diff options
559 files changed, 5393 insertions, 2985 deletions
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml index e287c8f..4165e7b 100644 --- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml +++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml @@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active format. For the single-planar API, applications must set <structfield> plane </structfield> to zero. Additional flags may be posted in the <structfield> flags </structfield> field. Refer to a manual for open() for details. -Currently only O_CLOEXEC is supported. All other fields must be set to zero. +Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All +other fields must be set to zero. In the case of multi-planar API, every plane is exported separately using multiple <constant> VIDIOC_EXPBUF </constant> calls. </para> @@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry> <entry>__u32</entry> <entry><structfield>flags</structfield></entry> <entry>Flags for the newly created file, currently only <constant> -O_CLOEXEC </constant> is supported, refer to the manual of open() for more -details.</entry> +O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY +</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual +of open() for more details.</entry> </row> <row> <entry>__s32</entry> diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt index f4faec0..2f2c6cd 100644 --- a/Documentation/assoc_array.txt +++ b/Documentation/assoc_array.txt @@ -164,10 +164,10 @@ This points to a number of methods, all of which need to be provided: (4) Diff the index keys of two objects. - int (*diff_objects)(const void *a, const void *b); + int (*diff_objects)(const void *object, const void *index_key); - Return the bit position at which the index keys of two objects differ or - -1 if they are the same. + Return the bit position at which the index key of the specified object + differs from the given index key or -1 if they are the same. (5) Free an object. diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt index 274752f..719320b 100644 --- a/Documentation/device-mapper/cache.txt +++ b/Documentation/device-mapper/cache.txt @@ -266,10 +266,12 @@ E.g. Invalidation is removing an entry from the cache without writing it back. Cache blocks can be invalidated via the invalidate_cblocks message, which takes an arbitrary number of cblock ranges. Each cblock -must be expressed as a decimal value, in the future a variant message -that takes cblock ranges expressed in hexidecimal may be needed to -better support efficient invalidation of larger caches. The cache must -be in passthrough mode when invalidate_cblocks is used. +range's end value is "one past the end", meaning 5-10 expresses a range +of values from 5 to 9. Each cblock must be expressed as a decimal +value, in the future a variant message that takes cblock ranges +expressed in hexidecimal may be needed to better support efficient +invalidation of larger caches. The cache must be in passthrough mode +when invalidate_cblocks is used. invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]* diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt index 48b259e..bad381f 100644 --- a/Documentation/devicetree/bindings/net/davinci_emac.txt +++ b/Documentation/devicetree/bindings/net/davinci_emac.txt @@ -4,7 +4,7 @@ This file provides information, what the device node for the davinci_emac interface contains. Required properties: -- compatible: "ti,davinci-dm6467-emac"; +- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac" - reg: Offset and length of the register set for the device - ti,davinci-ctrl-reg-offset: offset to control register - ti,davinci-ctrl-mod-reg-offset: offset to control module register diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt index 953049b..5a41a865 100644 --- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt +++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt @@ -8,3 +8,7 @@ Required properties: Optional properties: - phy-device : phandle to Ethernet phy - local-mac-address : Ethernet mac address to use +- reg-io-width : Mask of sizes (in bytes) of the IO accesses that + are supported on the device. Valid value for SMSC LAN91c111 are + 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning + 16-bit access only. diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt new file mode 100644 index 0000000..2b40e04 --- /dev/null +++ b/Documentation/module-signing.txt @@ -0,0 +1,240 @@ + ============================== + KERNEL MODULE SIGNING FACILITY + ============================== + +CONTENTS + + - Overview. + - Configuring module signing. + - Generating signing keys. + - Public keys in the kernel. + - Manually signing modules. + - Signed modules and stripping. + - Loading signed modules. + - Non-valid signatures and unsigned modules. + - Administering/protecting the private key. + + +======== +OVERVIEW +======== + +The kernel module signing facility cryptographically signs modules during +installation and then checks the signature upon loading the module. This +allows increased kernel security by disallowing the loading of unsigned modules +or modules signed with an invalid key. Module signing increases security by +making it harder to load a malicious module into the kernel. The module +signature checking is done by the kernel so that it is not necessary to have +trusted userspace bits. + +This facility uses X.509 ITU-T standard certificates to encode the public keys +involved. The signatures are not themselves encoded in any industrial standard +type. The facility currently only supports the RSA public key encryption +standard (though it is pluggable and permits others to be used). The possible +hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and +SHA-512 (the algorithm is selected by data in the signature). + + +========================== +CONFIGURING MODULE SIGNING +========================== + +The module signing facility is enabled by going to the "Enable Loadable Module +Support" section of the kernel configuration and turning on + + CONFIG_MODULE_SIG "Module signature verification" + +This has a number of options available: + + (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE) + + This specifies how the kernel should deal with a module that has a + signature for which the key is not known or a module that is unsigned. + + If this is off (ie. "permissive"), then modules for which the key is not + available and modules that are unsigned are permitted, but the kernel will + be marked as being tainted. + + If this is on (ie. "restrictive"), only modules that have a valid + signature that can be verified by a public key in the kernel's possession + will be loaded. All other modules will generate an error. + + Irrespective of the setting here, if the module has a signature block that + cannot be parsed, it will be rejected out of hand. + + + (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL) + + If this is on then modules will be automatically signed during the + modules_install phase of a build. If this is off, then the modules must + be signed manually using: + + scripts/sign-file + + + (3) "Which hash algorithm should modules be signed with?" + + This presents a choice of which hash algorithm the installation phase will + sign the modules with: + + CONFIG_SIG_SHA1 "Sign modules with SHA-1" + CONFIG_SIG_SHA224 "Sign modules with SHA-224" + CONFIG_SIG_SHA256 "Sign modules with SHA-256" + CONFIG_SIG_SHA384 "Sign modules with SHA-384" + CONFIG_SIG_SHA512 "Sign modules with SHA-512" + + The algorithm selected here will also be built into the kernel (rather + than being a module) so that modules signed with that algorithm can have + their signatures checked without causing a dependency loop. + + +======================= +GENERATING SIGNING KEYS +======================= + +Cryptographic keypairs are required to generate and check signatures. A +private key is used to generate a signature and the corresponding public key is +used to check it. The private key is only needed during the build, after which +it can be deleted or stored securely. The public key gets built into the +kernel so that it can be used to check the signatures as the modules are +loaded. + +Under normal conditions, the kernel build will automatically generate a new +keypair using openssl if one does not exist in the files: + + signing_key.priv + signing_key.x509 + +during the building of vmlinux (the public part of the key needs to be built +into vmlinux) using parameters in the: + + x509.genkey + +file (which is also generated if it does not already exist). + +It is strongly recommended that you provide your own x509.genkey file. + +Most notably, in the x509.genkey file, the req_distinguished_name section +should be altered from the default: + + [ req_distinguished_name ] + O = Magrathea + CN = Glacier signing key + emailAddress = slartibartfast@magrathea.h2g2 + +The generated RSA key size can also be set with: + + [ req ] + default_bits = 4096 + + +It is also possible to manually generate the key private/public files using the +x509.genkey key generation configuration file in the root node of the Linux +kernel sources tree and the openssl command. The following is an example to +generate the public/private key files: + + openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \ + -config x509.genkey -outform DER -out signing_key.x509 \ + -keyout signing_key.priv + + +========================= +PUBLIC KEYS IN THE KERNEL +========================= + +The kernel contains a ring of public keys that can be viewed by root. They're +in a keyring called ".system_keyring" that can be seen by: + + [root@deneb ~]# cat /proc/keys + ... + 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1 + 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 [] + ... + +Beyond the public key generated specifically for module signing, any file +placed in the kernel source root directory or the kernel build root directory +whose name is suffixed with ".x509" will be assumed to be an X.509 public key +and will be added to the keyring. + +Further, the architecture code may take public keys from a hardware store and +add those in also (e.g. from the UEFI key database). + +Finally, it is possible to add additional public keys by doing: + + keyctl padd asymmetric "" [.system_keyring-ID] <[key-file] + +e.g.: + + keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509 + +Note, however, that the kernel will only permit keys to be added to +.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key +that is already resident in the .system_keyring at the time the key was added. + + +========================= +MANUALLY SIGNING MODULES +========================= + +To manually sign a module, use the scripts/sign-file tool available in +the Linux kernel source tree. The script requires 4 arguments: + + 1. The hash algorithm (e.g., sha256) + 2. The private key filename + 3. The public key filename + 4. The kernel module to be signed + +The following is an example to sign a kernel module: + + scripts/sign-file sha512 kernel-signkey.priv \ + kernel-signkey.x509 module.ko + +The hash algorithm used does not have to match the one configured, but if it +doesn't, you should make sure that hash algorithm is either built into the +kernel or can be loaded without requiring itself. + + +============================ +SIGNED MODULES AND STRIPPING +============================ + +A signed module has a digital signature simply appended at the end. The string +"~Module signature appended~." at the end of the module's file confirms that a +signature is present but it does not confirm that the signature is valid! + +Signed modules are BRITTLE as the signature is outside of the defined ELF +container. Thus they MAY NOT be stripped once the signature is computed and +attached. Note the entire module is the signed payload, including any and all +debug information present at the time of signing. + + +====================== +LOADING SIGNED MODULES +====================== + +Modules are loaded with insmod, modprobe, init_module() or finit_module(), +exactly as for unsigned modules as no processing is done in userspace. The +signature checking is all done within the kernel. + + +========================================= +NON-VALID SIGNATURES AND UNSIGNED MODULES +========================================= + +If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on +the kernel command line, the kernel will only load validly signed modules +for which it has a public key. Otherwise, it will also load modules that are +unsigned. Any module for which the kernel has a key, but which proves to have +a signature mismatch will not be permitted to load. + +Any module that has an unparseable signature will be rejected. + + +========================================= +ADMINISTERING/PROTECTING THE PRIVATE KEY +========================================= + +Since the private key is used to sign modules, viruses and malware could use +the private key to sign modules and compromise the operating system. The +private key must be either destroyed or moved to a secure location and not kept +in the root node of the kernel source tree. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 3c12d9a..8a984e9 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -16,8 +16,12 @@ ip_default_ttl - INTEGER Default: 64 (as recommended by RFC1700) ip_no_pmtu_disc - BOOLEAN - Disable Path MTU Discovery. - default FALSE + Disable Path MTU Discovery. If enabled and a + fragmentation-required ICMP is received, the PMTU to this + destination will be set to min_pmtu (see below). You will need + to raise min_pmtu to the smallest interface MTU on your system + manually if you want to avoid locally generated fragments. + Default: FALSE min_pmtu - INTEGER default 552 - minimum discovered Path MTU diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index c012236..8e48e3b 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below. [shutdown] close() --------> destruction of the transmission socket and deallocation of all associated resources. +Socket creation and destruction is also straight forward, and is done +the same way as in capturing described in the previous paragraph: + + int fd = socket(PF_PACKET, mode, 0); + +The protocol can optionally be 0 in case we only want to transmit +via this socket, which avoids an expensive call to packet_rcv(). +In this case, you also need to bind(2) the TX_RING with sll_protocol = 0 +set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example. + Binding the socket to your network interface is mandatory (with zero copy) to know the header size of frames used in the circular buffer. diff --git a/MAINTAINERS b/MAINTAINERS index 9486fb6..d5e4ff3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -893,20 +893,15 @@ F: arch/arm/include/asm/hardware/dec21285.h F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE +M: Shawn Guo <shawn.guo@linaro.org> M: Sascha Hauer <kernel@pengutronix.de> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.pengutronix.de/git/imx/linux-2.6.git +T: git git://git.linaro.org/people/shawnguo/linux-2.6.git F: arch/arm/mach-imx/ +F: arch/arm/boot/dts/imx* F: arch/arm/configs/imx*_defconfig -ARM/FREESCALE IMX6 -M: Shawn Guo <shawn.guo@linaro.org> -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained -T: git git://git.linaro.org/people/shawnguo/linux-2.6.git -F: arch/arm/mach-imx/*imx6* - ARM/FREESCALE MXS ARM ARCHITECTURE M: Shawn Guo <shawn.guo@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1013,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ +F: drivers/clk/keystone/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git ARM/LOGICPD PXA270 MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> @@ -3766,9 +3763,11 @@ F: include/uapi/linux/gigaset_dev.h GPIO SUBSYSTEM M: Linus Walleij <linus.walleij@linaro.org> -S: Maintained +M: Alexandre Courbot <gnurou@gmail.com> L: linux-gpio@vger.kernel.org -F: Documentation/gpio.txt +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git +S: Maintained +F: Documentation/gpio/ F: drivers/gpio/ F: include/linux/gpio* F: include/asm-generic/gpio.h @@ -3836,6 +3835,12 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/ +GUID PARTITION TABLE (GPT) +M: Davidlohr Bueso <davidlohr@hp.com> +L: linux-efi@vger.kernel.org +S: Maintained +F: block/partitions/efi.* + STK1160 USB VIDEO CAPTURE DRIVER M: Ezequiel Garcia <elezegarcia@gmail.com> L: linux-media@vger.kernel.org @@ -4471,10 +4476,8 @@ M: Bruce Allan <bruce.w.allan@intel.com> M: Carolyn Wyborny <carolyn.wyborny@intel.com> M: Don Skidmore <donald.c.skidmore@intel.com> M: Greg Rose <gregory.v.rose@intel.com> -M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> M: Alex Duyck <alexander.h.duyck@intel.com> M: John Ronciak <john.ronciak@intel.com> -M: Tushar Dave <tushar.n.dave@intel.com> L: e1000-devel@lists.sourceforge.net W: http://www.intel.com/support/feedback.htm W: http://e1000.sourceforge.net/ @@ -5918,12 +5921,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com> M: Herbert Xu <herbert@gondor.apana.org.au> M: "David S. Miller" <davem@davemloft.net> L: netdev@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git S: Maintained F: net/xfrm/ F: net/key/ F: net/ipv4/xfrm* +F: net/ipv4/esp4.c +F: net/ipv4/ah4.c +F: net/ipv4/ipcomp.c +F: net/ipv4/ip_vti.c F: net/ipv6/xfrm* +F: net/ipv6/esp6.c +F: net/ipv6/ah6.c +F: net/ipv6/ipcomp6.c +F: net/ipv6/ip6_vti.c F: include/uapi/linux/xfrm.h F: include/net/xfrm.h @@ -6470,19 +6482,52 @@ F: drivers/pci/ F: include/linux/pci* F: arch/x86/pci/ +PCI DRIVER FOR IMX6 +M: Richard Zhu <r65037@freescale.com> +M: Shawn Guo <shawn.guo@linaro.org> +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/pci/host/*imx6* + +PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) +M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +M: Jason Cooper <jason@lakedaemon.net> +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/pci/host/*mvebu* + PCI DRIVER FOR NVIDIA TEGRA M: Thierry Reding <thierry.reding@gmail.com> L: linux-tegra@vger.kernel.org +L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt F: drivers/pci/host/pci-tegra.c +PCI DRIVER FOR RENESAS R-CAR +M: Simon Horman <horms@verge.net.au> +L: linux-pci@vger.kernel.org +L: linux-sh@vger.kernel.org +S: Maintained +F: drivers/pci/host/*rcar* + PCI DRIVER FOR SAMSUNG EXYNOS M: Jingoo Han <jg1.han@samsung.com> L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: drivers/pci/host/pci-exynos.c +PCI DRIVER FOR SYNOPSIS DESIGNWARE +M: Mohit Kumar <mohit.kumar@st.com> +M: Jingoo Han <jg1.han@samsung.com> +L: linux-pci@vger.kernel.org +S: Maintained +F: drivers/pci/host/*designware* + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org @@ -9545,7 +9590,7 @@ F: drivers/xen/*swiotlb* XFS FILESYSTEM P: Silicon Graphics Inc -M: Dave Chinner <dchinner@fromorbit.com> +M: Dave Chinner <david@fromorbit.com> M: Ben Myers <bpm@sgi.com> M: xfs@oss.sgi.com L: xfs@oss.sgi.com @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc5 NAME = One Giant Leap for Frogkind # *DOCUMENTATION* @@ -732,19 +732,15 @@ export mod_strip_cmd # Select initial ramdisk compression format, default is gzip(1). # This shall be used by the dracut(8) tool while creating an initramfs image. # -INITRD_COMPRESS=gzip -ifeq ($(CONFIG_RD_BZIP2), y) - INITRD_COMPRESS=bzip2 -else ifeq ($(CONFIG_RD_LZMA), y) - INITRD_COMPRESS=lzma -else ifeq ($(CONFIG_RD_XZ), y) - INITRD_COMPRESS=xz -else ifeq ($(CONFIG_RD_LZO), y) - INITRD_COMPRESS=lzo -else ifeq ($(CONFIG_RD_LZ4), y) - INITRD_COMPRESS=lz4 -endif -export INITRD_COMPRESS +INITRD_COMPRESS-y := gzip +INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2 +INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma +INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz +INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo +INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 +# do not export INITRD_COMPRESS, since we didn't actually +# choose a sane default compression above. +# export INITRD_COMPRESS := $(INITRD_COMPRESS-y) ifdef CONFIG_MODULE_SIG_ALL MODSECKEY = ./signing_key.priv diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 2ee0c9b..9063ae6 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -8,6 +8,7 @@ config ARC def_bool y + select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev select DEVTMPFS if !INITRAMFS_SOURCE="" diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 6f30484..39e58d1 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -8,6 +8,13 @@ /******** no-legacy-syscalls-ABI *******/ +/* + * Non-typical guard macro to enable inclusion twice in ARCH sys.c + * That is how the Generic syscall wrapper generator works + */ +#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) +#define _UAPI_ASM_ARC_UNISTD_H + #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK @@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ #define __NR_sysfs (__NR_arch_specific_syscall + 3) __SYSCALL(__NR_sysfs, sys_sysfs) + +#undef __SYSCALL + +#endif diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index e46d81f..63177e4 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config) cache_result = (config >> 16) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; - if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX) + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; - if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX) + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index e99dfaf..03fcbf0 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -7,11 +7,11 @@ */ /dts-v1/; -#include "omap34xx.dtsi" +#include "am3517.dtsi" / { - model = "TI AM3517 EVM (AM3517/05)"; - compatible = "ti,am3517-evm", "ti,omap3"; + model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)"; + compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3"; memory { device_type = "memory"; diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi new file mode 100644 index 0000000..2fbe02f --- /dev/null +++ b/arch/arm/boot/dts/am3517.dtsi @@ -0,0 +1,63 @@ +/* + * Device Tree Source for am3517 SoC + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include "omap3.dtsi" + +/ { + aliases { + serial3 = &uart4; + }; + + ocp { + am35x_otg_hs: am35x_otg_hs@5c040000 { + compatible = "ti,omap3-musb"; + ti,hwmods = "am35x_otg_hs"; + status = "disabled"; + reg = <0x5c040000 0x1000>; + interrupts = <71>; + interrupt-names = "mc"; + }; + + davinci_emac: ethernet@0x5c000000 { + compatible = "ti,am3517-emac"; + ti,hwmods = "davinci_emac"; + status = "disabled"; + reg = <0x5c000000 0x30000>; + interrupts = <67 68 69 70>; + ti,davinci-ctrl-reg-offset = <0x10000>; + ti,davinci-ctrl-mod-reg-offset = <0>; + ti,davinci-ctrl-ram-offset = <0x20000>; + ti,davinci-ctrl-ram-size = <0x2000>; + ti,davinci-rmii-en = /bits/ 8 <1>; + local-mac-address = [ 00 00 00 00 00 00 ]; + }; + + davinci_mdio: ethernet@0x5c030000 { + compatible = "ti,davinci_mdio"; + ti,hwmods = "davinci_mdio"; + status = "disabled"; + reg = <0x5c030000 0x1000>; + bus_freq = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + uart4: serial@4809e000 { + compatible = "ti,omap3-uart"; + ti,hwmods = "uart4"; + status = "disabled"; + reg = <0x4809e000 0x400>; + interrupts = <84>; + dmas = <&sdma 55 &sdma 54>; + dma-names = "tx", "rx"; + clock-frequency = <48000000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index c2c306d..6fc85f9 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -9,7 +9,7 @@ /dts-v1/; -#include "omap34xx.dtsi" +#include "omap34xx-hs.dtsi" / { model = "Nokia N900"; diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 94eb77d..5c26c18 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -8,7 +8,7 @@ * published by the Free Software Foundation. */ -#include "omap36xx.dtsi" +#include "omap36xx-hs.dtsi" / { cpus { diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi new file mode 100644 index 0000000..1ff6264 --- /dev/null +++ b/arch/arm/boot/dts/omap34xx-hs.dtsi @@ -0,0 +1,16 @@ +/* Disabled modules for secure omaps */ + +#include "omap34xx.dtsi" + +/* Secure omaps have some devices inaccessible depending on the firmware */ +&aes { + status = "disabled"; +}; + +&sham { + status = "disabled"; +}; + +&timer12 { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi new file mode 100644 index 0000000..2c7febb --- /dev/null +++ b/arch/arm/boot/dts/omap36xx-hs.dtsi @@ -0,0 +1,16 @@ +/* Disabled modules for secure omaps */ + +#include "omap36xx.dtsi" + +/* Secure omaps have some devices inaccessible depending on the firmware */ +&aes { + status = "disabled"; +}; + +&sham { + status = "disabled"; +}; + +&timer12 { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fa..46e1d7e 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -87,9 +87,9 @@ interrupts = <1 9 0xf04>; }; - gpio0: gpio@ffc40000 { + gpio0: gpio@e6050000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc40000 0 0x2c>; + reg = <0 0xe6050000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 4 0x4>; #gpio-cells = <2>; @@ -99,9 +99,9 @@ interrupt-controller; }; - gpio1: gpio@ffc41000 { + gpio1: gpio@e6051000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc41000 0 0x2c>; + reg = <0 0xe6051000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 5 0x4>; #gpio-cells = <2>; @@ -111,9 +111,9 @@ interrupt-controller; }; - gpio2: gpio@ffc42000 { + gpio2: gpio@e6052000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc42000 0 0x2c>; + reg = <0 0xe6052000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 6 0x4>; #gpio-cells = <2>; @@ -123,9 +123,9 @@ interrupt-controller; }; - gpio3: gpio@ffc43000 { + gpio3: gpio@e6053000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc43000 0 0x2c>; + reg = <0 0xe6053000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 7 0x4>; #gpio-cells = <2>; @@ -135,9 +135,9 @@ interrupt-controller; }; - gpio4: gpio@ffc44000 { + gpio4: gpio@e6054000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc44000 0 0x2c>; + reg = <0 0xe6054000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 8 0x4>; #gpio-cells = <2>; @@ -147,9 +147,9 @@ interrupt-controller; }; - gpio5: gpio@ffc45000 { + gpio5: gpio@e6055000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc45000 0 0x2c>; + reg = <0 0xe6055000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 9 0x4>; #gpio-cells = <2>; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index c1751a6..7f5878c 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -193,7 +193,10 @@ pio: pinctrl@01c20800 { compatible = "allwinner,sun6i-a31-pinctrl"; reg = <0x01c20800 0x400>; - interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>; + interrupts = <0 11 4>, + <0 15 4>, + <0 16 4>, + <0 17 4>; clocks = <&apb1_gates 5>; gpio-controller; interrupt-controller; @@ -212,11 +215,11 @@ timer@01c20c00 { compatible = "allwinner,sun4i-timer"; reg = <0x01c20c00 0xa0>; - interrupts = <0 18 1>, - <0 19 1>, - <0 20 1>, - <0 21 1>, - <0 22 1>; + interrupts = <0 18 4>, + <0 19 4>, + <0 20 4>, + <0 21 4>, + <0 22 4>; clocks = <&osc24M>; }; @@ -228,7 +231,7 @@ uart0: serial@01c28000 { compatible = "snps,dw-apb-uart"; reg = <0x01c28000 0x400>; - interrupts = <0 0 1>; + interrupts = <0 0 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 16>; @@ -238,7 +241,7 @@ uart1: serial@01c28400 { compatible = "snps,dw-apb-uart"; reg = <0x01c28400 0x400>; - interrupts = <0 1 1>; + interrupts = <0 1 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 17>; @@ -248,7 +251,7 @@ uart2: serial@01c28800 { compatible = "snps,dw-apb-uart"; reg = <0x01c28800 0x400>; - interrupts = <0 2 1>; + interrupts = <0 2 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 18>; @@ -258,7 +261,7 @@ uart3: serial@01c28c00 { compatible = "snps,dw-apb-uart"; reg = <0x01c28c00 0x400>; - interrupts = <0 3 1>; + interrupts = <0 3 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 19>; @@ -268,7 +271,7 @@ uart4: serial@01c29000 { compatible = "snps,dw-apb-uart"; reg = <0x01c29000 0x400>; - interrupts = <0 4 1>; + interrupts = <0 4 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 20>; @@ -278,7 +281,7 @@ uart5: serial@01c29400 { compatible = "snps,dw-apb-uart"; reg = <0x01c29400 0x400>; - interrupts = <0 5 1>; + interrupts = <0 5 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 21>; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e46cfed..367611a 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -170,7 +170,7 @@ emac: ethernet@01c0b000 { compatible = "allwinner,sun4i-emac"; reg = <0x01c0b000 0x1000>; - interrupts = <0 55 1>; + interrupts = <0 55 4>; clocks = <&ahb_gates 17>; status = "disabled"; }; @@ -186,7 +186,7 @@ pio: pinctrl@01c20800 { compatible = "allwinner,sun7i-a20-pinctrl"; reg = <0x01c20800 0x400>; - interrupts = <0 28 1>; + interrupts = <0 28 4>; clocks = <&apb0_gates 5>; gpio-controller; interrupt-controller; @@ -251,12 +251,12 @@ timer@01c20c00 { compatible = "allwinner,sun4i-timer"; reg = <0x01c20c00 0x90>; - interrupts = <0 22 1>, - <0 23 1>, - <0 24 1>, - <0 25 1>, - <0 67 1>, - <0 68 1>; + interrupts = <0 22 4>, + <0 23 4>, + <0 24 4>, + <0 25 4>, + <0 67 4>, + <0 68 4>; clocks = <&osc24M>; }; @@ -273,7 +273,7 @@ uart0: serial@01c28000 { compatible = "snps,dw-apb-uart"; reg = <0x01c28000 0x400>; - interrupts = <0 1 1>; + interrupts = <0 1 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 16>; @@ -283,7 +283,7 @@ uart1: serial@01c28400 { compatible = "snps,dw-apb-uart"; reg = <0x01c28400 0x400>; - interrupts = <0 2 1>; + interrupts = <0 2 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 17>; @@ -293,7 +293,7 @@ uart2: serial@01c28800 { compatible = "snps,dw-apb-uart"; reg = <0x01c28800 0x400>; - interrupts = <0 3 1>; + interrupts = <0 3 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 18>; @@ -303,7 +303,7 @@ uart3: serial@01c28c00 { compatible = "snps,dw-apb-uart"; reg = <0x01c28c00 0x400>; - interrupts = <0 4 1>; + interrupts = <0 4 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 19>; @@ -313,7 +313,7 @@ uart4: serial@01c29000 { compatible = "snps,dw-apb-uart"; reg = <0x01c29000 0x400>; - interrupts = <0 17 1>; + interrupts = <0 17 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 20>; @@ -323,7 +323,7 @@ uart5: serial@01c29400 { compatible = "snps,dw-apb-uart"; reg = <0x01c29400 0x400>; - interrupts = <0 18 1>; + interrupts = <0 18 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 21>; @@ -333,7 +333,7 @@ uart6: serial@01c29800 { compatible = "snps,dw-apb-uart"; reg = <0x01c29800 0x400>; - interrupts = <0 19 1>; + interrupts = <0 19 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 22>; @@ -343,7 +343,7 @@ uart7: serial@01c29c00 { compatible = "snps,dw-apb-uart"; reg = <0x01c29c00 0x400>; - interrupts = <0 20 1>; + interrupts = <0 20 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 23>; @@ -353,7 +353,7 @@ i2c0: i2c@01c2ac00 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2ac00 0x400>; - interrupts = <0 7 1>; + interrupts = <0 7 4>; clocks = <&apb1_gates 0>; clock-frequency = <100000>; status = "disabled"; @@ -362,7 +362,7 @@ i2c1: i2c@01c2b000 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2b000 0x400>; - interrupts = <0 8 1>; + interrupts = <0 8 4>; clocks = <&apb1_gates 1>; clock-frequency = <100000>; status = "disabled"; @@ -371,7 +371,7 @@ i2c2: i2c@01c2b400 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2b400 0x400>; - interrupts = <0 9 1>; + interrupts = <0 9 4>; clocks = <&apb1_gates 2>; clock-frequency = <100000>; status = "disabled"; @@ -380,7 +380,7 @@ i2c3: i2c@01c2b800 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2b800 0x400>; - interrupts = <0 88 1>; + interrupts = <0 88 4>; clocks = <&apb1_gates 3>; clock-frequency = <100000>; status = "disabled"; @@ -389,7 +389,7 @@ i2c4: i2c@01c2bc00 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2bc00 0x400>; - interrupts = <0 89 1>; + interrupts = <0 89 4>; clocks = <&apb1_gates 15>; clock-frequency = <100000>; status = "disabled"; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 9ecccc8..6976b03 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -100,23 +100,19 @@ #define TASK_UNMAPPED_BASE UL(0x00000000) #endif -#ifndef PHYS_OFFSET -#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) -#endif - #ifndef END_MEM #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET -#define PAGE_OFFSET (PHYS_OFFSET) +#define PAGE_OFFSET PLAT_PHYS_OFFSET #endif /* * The module can be at any place in ram in nommu mode. */ #define MODULES_END (END_MEM) -#define MODULES_VADDR (PHYS_OFFSET) +#define MODULES_VADDR PAGE_OFFSET #define XIP_VIRT_ADDR(physaddr) (physaddr) @@ -157,6 +153,16 @@ #endif #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) +/* + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical + * memory. This is used for XIP and NoMMU kernels, or by kernels which + * have their own mach/memory.h. Assembly code must always use + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. + */ +#ifndef PLAT_PHYS_OFFSET +#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) +#endif + #ifndef __ASSEMBLY__ /* @@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #else +#define PHYS_OFFSET PLAT_PHYS_OFFSET + static inline phys_addr_t __virt_to_phys(unsigned long x) { return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; @@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #endif #endif -#endif /* __ASSEMBLY__ */ - -#ifndef PHYS_OFFSET -#ifdef PLAT_PHYS_OFFSET -#define PHYS_OFFSET PLAT_PHYS_OFFSET -#else -#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) -#endif -#endif - -#ifndef __ASSEMBLY__ /* * PFNs are used to describe any physical page; this means diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 14235ba..716249c 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -68,7 +68,7 @@ ENTRY(stext) #ifdef CONFIG_ARM_MPU /* Calculate the size of a region covering just the kernel */ - ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET + ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET ldr r6, =(_end) @ Cover whole kernel sub r6, r6, r5 @ Minimum size of region to map clz r6, r6 @ Region size must be 2^N... @@ -213,7 +213,7 @@ ENTRY(__setup_mpu) set_region_nr r0, #MPU_RAM_REGION isb /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ - ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET + ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 11d59b3..32f317e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -110,7 +110,7 @@ ENTRY(stext) sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) add r8, r8, r4 @ PHYS_OFFSET #else - ldr r8, =PHYS_OFFSET @ always constant in this case + ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94f6b05..92f7b15 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; + unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p) frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); + stack_page = (unsigned long)task_stack_page(p); do { - int ret = unwind_frame(&frame); - if (ret < 0) + if (frame.sp < stack_page || + frame.sp >= stack_page + THREAD_SIZE || + unwind_frame(&frame) < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6a1b8a8..987a7f5 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p) machine_desc = mdesc; machine_name = mdesc->name; - setup_dma_zone(mdesc); - if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; @@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p) sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); + setup_dma_zone(mdesc); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 00f79e5..af4e8c8 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame) high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ - if (fp < (low + 12) || fp + 4 >= high) + if (fp < low + 12 || fp > high - 4) return -EINVAL; /* restore the registers from the stack frame */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index dbf0923..7940241 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -509,9 +509,10 @@ static inline int __do_cache_op(unsigned long start, unsigned long end) { int ret; - unsigned long chunk = PAGE_SIZE; do { + unsigned long chunk = min(PAGE_SIZE, end - start); + if (signal_pending(current)) { struct thread_info *ti = current_thread_info(); diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index c46eccb..78829c5 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -487,7 +487,7 @@ int __init da8xx_register_emac(void) static struct resource da830_mcasp1_resources[] = { { - .name = "mcasp1", + .name = "mpu", .start = DAVINCI_DA830_MCASP1_REG_BASE, .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, @@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = { static struct resource da850_mcasp_resources[] = { { - .name = "mcasp", + .name = "mpu", .start = DAVINCI_DA8XX_MCASP0_REG_BASE, .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index ef9ff1f..6117fc6 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = { static struct resource dm355_asp1_resources[] = { { + .name = "mpu", .start = DAVINCI_ASP1_BASE, .end = DAVINCI_ASP1_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, @@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = { int __init dm355_gpio_register(void) { return davinci_gpio_register(dm355_gpio_resources, - sizeof(dm355_gpio_resources), + ARRAY_SIZE(dm355_gpio_resources), &dm355_gpio_platform_data); } /*----------------------------------------------------------------------*/ diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 1511a06..d7c6f85 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = { int __init dm365_gpio_register(void) { return davinci_gpio_register(dm365_gpio_resources, - sizeof(dm365_gpio_resources), + ARRAY_SIZE(dm365_gpio_resources), &dm365_gpio_platform_data); } @@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = { static struct resource dm365_asp_resources[] = { { + .name = "mpu", .start = DAVINCI_DM365_ASP0_BASE, .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 143a321..3ce4799 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = { /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ static struct resource dm644x_asp_resources[] = { { + .name = "mpu", .start = DAVINCI_ASP0_BASE, .end = DAVINCI_ASP0_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, @@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = { int __init dm644x_gpio_register(void) { return davinci_gpio_register(dm644_gpio_resources, - sizeof(dm644_gpio_resources), + ARRAY_SIZE(dm644_gpio_resources), &dm644_gpio_platform_data); } /*----------------------------------------------------------------------*/ diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 2a73f29..0e81fea65 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = { static struct resource dm646x_mcasp0_resources[] = { { - .name = "mcasp0", + .name = "mpu", .start = DAVINCI_DM646X_MCASP0_REG_BASE, .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1, .flags = IORESOURCE_MEM, @@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = { static struct resource dm646x_mcasp1_resources[] = { { - .name = "mcasp1", + .name = "mpu", .start = DAVINCI_DM646X_MCASP1_REG_BASE, .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1, .flags = IORESOURCE_MEM, @@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { int __init dm646x_gpio_register(void) { return davinci_gpio_register(dm646x_gpio_resources, - sizeof(dm646x_gpio_resources), + ARRAY_SIZE(dm646x_gpio_resources), &dm646x_gpio_platform_data); } /*----------------------------------------------------------------------*/ diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index b3d7e56..bd3bf66 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -17,12 +17,15 @@ #include <linux/clkdev.h> #include <linux/clocksource.h> #include <linux/dma-mapping.h> +#include <linux/input.h> #include <linux/io.h> #include <linux/irqchip.h> +#include <linux/mailbox.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of_address.h> +#include <linux/reboot.h> #include <linux/amba/bus.h> #include <linux/platform_device.h> @@ -130,6 +133,24 @@ static struct platform_device highbank_cpuidle_device = { .name = "cpuidle-calxeda", }; +static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data) +{ + u32 key = *(u32 *)data; + + if (event != 0x1000) + return 0; + + if (key == KEY_POWER) + orderly_poweroff(false); + else if (key == 0xffff) + ctrl_alt_del(); + + return 0; +} +static struct notifier_block hb_keys_nb = { + .notifier_call = hb_keys_notifier, +}; + static void __init highbank_init(void) { struct device_node *np; @@ -145,6 +166,8 @@ static void __init highbank_init(void) bus_register_notifier(&platform_bus_type, &highbank_platform_nb); bus_register_notifier(&amba_bustype, &highbank_amba_nb); + pl320_ipc_register_notifier(&hb_keys_nb); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); if (psci_ops.cpu_suspend) diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 19f1652..8d972ff1 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)") .dt_compat = omap3_gp_boards_compat, .restart = omap3xxx_restart, MACHINE_END + +static const char *am3517_boards_compat[] __initdata = { + "ti,am3517", + NULL, +}; + +DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)") + .reserve = omap_reserve, + .map_io = omap3_map_io, + .init_early = am35xx_init_early, + .init_irq = omap_intc_of_init, + .handle_irq = omap3_intc_handle_irq, + .init_machine = omap_generic_init, + .init_late = omap3_init_late, + .init_time = omap3_gptimer_timer_init, + .dt_compat = am3517_boards_compat, + .restart = omap3xxx_restart, +MACHINE_END #endif #ifdef CONFIG_SOC_AM33XX diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 58347bb..4cf1655 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { { "dss_hdmi", "omapdss_hdmi", -1 }, }; +static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) +{ + u32 enable_mask, enable_shift; + u32 pipd_mask, pipd_shift; + u32 reg; + + if (dsi_id == 0) { + enable_mask = OMAP4_DSI1_LANEENABLE_MASK; + enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; + pipd_mask = OMAP4_DSI1_PIPD_MASK; + pipd_shift = OMAP4_DSI1_PIPD_SHIFT; + } else if (dsi_id == 1) { + enable_mask = OMAP4_DSI2_LANEENABLE_MASK; + enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; + pipd_mask = OMAP4_DSI2_PIPD_MASK; + pipd_shift = OMAP4_DSI2_PIPD_SHIFT; + } else { + return -ENODEV; + } + + reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); + + reg &= ~enable_mask; + reg &= ~pipd_mask; + + reg |= (lanes << enable_shift) & enable_mask; + reg |= (lanes << pipd_shift) & pipd_mask; + + omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); + + return 0; +} + static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) { + if (cpu_is_omap44xx()) + return omap4_dsi_mux_pads(dsi_id, lane_mask); + return 0; } static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) { + if (cpu_is_omap44xx()) + omap4_dsi_mux_pads(dsi_id, 0); } static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 53f0735..e0a398c 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev) odbfd_exit1: kfree(hwmods); odbfd_exit: + /* if data/we are at fault.. load up a fail handler */ + if (ret) + pdev->dev.pm_domain = &omap_device_fail_pm_domain; + return ret; } @@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } + +static int _od_fail_runtime_suspend(struct device *dev) +{ + dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); + return -ENODEV; +} + +static int _od_fail_runtime_resume(struct device *dev) +{ + dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); + return -ENODEV; +} + #endif #ifdef CONFIG_SUSPEND @@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev) #define _od_resume_noirq NULL #endif +struct dev_pm_domain omap_device_fail_pm_domain = { + .ops = { + SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend, + _od_fail_runtime_resume, NULL) + } +}; + struct dev_pm_domain omap_device_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h index 17ca1ae..78c02b3 100644 --- a/arch/arm/mach-omap2/omap_device.h +++ b/arch/arm/mach-omap2/omap_device.h @@ -29,6 +29,7 @@ #include "omap_hwmod.h" extern struct dev_pm_domain omap_device_pm_domain; +extern struct dev_pm_domain omap_device_fail_pm_domain; /* omap_device._state values */ #define OMAP_DEVICE_STATE_UNKNOWN 0 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index e3f0eca..8a1b5e0 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v) } /** - * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v + * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v * @oh: struct omap_hwmod * * @v: pointer to register contents to modify * @@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v) } /** + * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v + * @oh: struct omap_hwmod * + * @v: pointer to register contents to modify + * + * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon + * error or 0 upon success. + */ +static int _clear_softreset(struct omap_hwmod *oh, u32 *v) +{ + u32 softrst_mask; + + if (!oh->class->sysc || + !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) + return -EINVAL; + + if (!oh->class->sysc->sysc_fields) { + WARN(1, + "omap_hwmod: %s: sysc_fields absent for sysconfig class\n", + oh->name); + return -EINVAL; + } + + softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); + + *v &= ~softrst_mask; + + return 0; +} + +/** * _wait_softreset_complete - wait for an OCP softreset to complete * @oh: struct omap_hwmod * to wait on * @@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh) pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", oh->name, os->clk); ret = -EINVAL; + continue; } os->_clk = c; /* @@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh) pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", oh->name, oc->clk); ret = -EINVAL; + continue; } oc->_clk = c; /* @@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh) ret = _set_softreset(oh, &v); if (ret) goto dis_opt_clks; + + _write_sysconfig(v, oh); + ret = _clear_softreset(oh, &v); + if (ret) + goto dis_opt_clks; + _write_sysconfig(v, oh); if (oh->class->sysc->srst_udelay) @@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh) return 0; } +static int of_dev_find_hwmod(struct device_node *np, + struct omap_hwmod *oh) +{ + int count, i, res; + const char *p; + + count = of_property_count_strings(np, "ti,hwmods"); + if (count < 1) + return -ENODEV; + + for (i = 0; i < count; i++) { + res = of_property_read_string_index(np, "ti,hwmods", + i, &p); + if (res) + continue; + if (!strcmp(p, oh->name)) { + pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n", + np->name, i, oh->name); + return i; + } + } + + return -ENODEV; +} + /** * of_dev_hwmod_lookup - look up needed hwmod from dt blob * @np: struct device_node * * @oh: struct omap_hwmod * + * @index: index of the entry found + * @found: struct device_node * found or NULL * * Parse the dt blob and find out needed hwmod. Recursive function is * implemented to take care hierarchical dt blob parsing. - * Return: The device node on success or NULL on failure. + * Return: Returns 0 on success, -ENODEV when not found. */ -static struct device_node *of_dev_hwmod_lookup(struct device_node *np, - struct omap_hwmod *oh) +static int of_dev_hwmod_lookup(struct device_node *np, + struct omap_hwmod *oh, + int *index, + struct device_node **found) { - struct device_node *np0 = NULL, *np1 = NULL; - const char *p; + struct device_node *np0 = NULL; + int res; + + res = of_dev_find_hwmod(np, oh); + if (res >= 0) { + *found = np; + *index = res; + return 0; + } for_each_child_of_node(np, np0) { - if (of_find_property(np0, "ti,hwmods", NULL)) { - p = of_get_property(np0, "ti,hwmods", NULL); - if (!strcmp(p, oh->name)) - return np0; - np1 = of_dev_hwmod_lookup(np0, oh); - if (np1) - return np1; + struct device_node *fc; + int i; + + res = of_dev_hwmod_lookup(np0, oh, &i, &fc); + if (res == 0) { + *found = fc; + *index = i; + return 0; } } - return NULL; + + *found = NULL; + *index = 0; + + return -ENODEV; } /** * _init_mpu_rt_base - populate the virtual address for a hwmod * @oh: struct omap_hwmod * to locate the virtual address * @data: (unused, caller should pass NULL) + * @index: index of the reg entry iospace in device tree * @np: struct device_node * of the IP block's device node in the DT data * * Cache the virtual address used by the MPU to access this IP block's @@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np, * -ENXIO on absent or invalid register target address space. */ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, - struct device_node *np) + int index, struct device_node *np) { struct omap_hwmod_addr_space *mem; void __iomem *va_start = NULL; @@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, if (!np) return -ENXIO; - va_start = of_iomap(np, oh->mpu_rt_idx); + va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); } if (!va_start) { - pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); + if (mem) + pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); + else + pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n", + oh->name, index, np->full_name); return -ENXIO; } @@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, */ static int __init _init(struct omap_hwmod *oh, void *data) { - int r; + int r, index; struct device_node *np = NULL; if (oh->_state != _HWMOD_STATE_REGISTERED) return 0; - if (of_have_populated_dt()) - np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); + if (of_have_populated_dt()) { + struct device_node *bus; + + bus = of_find_node_by_name(NULL, "ocp"); + if (!bus) + return -ENODEV; + + r = of_dev_hwmod_lookup(bus, oh, &index, &np); + if (r) + pr_debug("omap_hwmod: %s missing dt data\n", oh->name); + else if (np && index) + pr_warn("omap_hwmod: %s using broken dt data from %s\n", + oh->name, np->name); + } if (oh->class->sysc) { - r = _init_mpu_rt_base(oh, NULL, np); + r = _init_mpu_rt_base(oh, NULL, index, np); if (r < 0) { WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", oh->name); @@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh) goto error; _write_sysconfig(v, oh); + ret = _clear_softreset(oh, &v); + if (ret) + goto error; + _write_sysconfig(v, oh); + error: return ret; } diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 9e56fab..d337429 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = { .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | - SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), + SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | + SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, @@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = { * hence HWMOD_SWSUP_MSTANDBY */ - /* - * During system boot; If the hwmod framework resets the module - * the module will have smart idle settings; which can lead to deadlock - * (above Errata Id:i660); so, dont reset the module during boot; - * Use HWMOD_INIT_NO_RESET. - */ - - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | - HWMOD_INIT_NO_RESET, + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 1e5b12c..3318cae9 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = { .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | - SYSC_HAS_SOFTRESET), + SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), @@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = { * hence HWMOD_SWSUP_MSTANDBY */ - /* - * During system boot; If the hwmod framework resets the module - * the module will have smart idle settings; which can lead to deadlock - * (above Errata Id:i660); so, dont reset the module during boot; - * Use HWMOD_INIT_NO_RESET. - */ - - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | - HWMOD_INIT_NO_RESET, + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index 9e08d69..e297d62 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | - SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), + SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), @@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = { * hence HWMOD_SWSUP_MSTANDBY */ - /* - * During system boot; If the hwmod framework resets the module - * the module will have smart idle settings; which can lead to deadlock - * (above Errata Id:i660); so, dont reset the module during boot; - * Use HWMOD_INIT_NO_RESET. - */ - - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | - HWMOD_INIT_NO_RESET, + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, .main_clk = "l3init_60m_fclk", .prcm = { .omap4 = { diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 0d5dd64..263b152 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -13,6 +13,7 @@ #include <mach/regs-ost.h> #include <mach/reset.h> +#include <mach/smemc.h> unsigned int reset_status; EXPORT_SYMBOL(reset_status); @@ -81,6 +82,12 @@ static void do_hw_reset(void) writel_relaxed(OSSR_M3, OSSR); /* ... in 100 ms */ writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); + /* + * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71) + * we put SDRAM into self-refresh to prevent that + */ + while (1) + writel_relaxed(MDREFR_SLFRSH, MDREFR); } void pxa_restart(enum reboot_mode mode, const char *cmd) @@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd) break; } } - diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 0206b91..ef5557b 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = { * Tosa Keyboard */ static const uint32_t tosakbd_keymap[] = { - KEY(0, 2, KEY_W), - KEY(0, 6, KEY_K), - KEY(0, 7, KEY_BACKSPACE), - KEY(0, 8, KEY_P), - KEY(1, 1, KEY_Q), - KEY(1, 2, KEY_E), - KEY(1, 3, KEY_T), - KEY(1, 4, KEY_Y), - KEY(1, 6, KEY_O), - KEY(1, 7, KEY_I), - KEY(1, 8, KEY_COMMA), - KEY(2, 1, KEY_A), - KEY(2, 2, KEY_D), - KEY(2, 3, KEY_G), - KEY(2, 4, KEY_U), - KEY(2, 6, KEY_L), - KEY(2, 7, KEY_ENTER), - KEY(2, 8, KEY_DOT), - KEY(3, 1, KEY_Z), - KEY(3, 2, KEY_C), - KEY(3, 3, KEY_V), - KEY(3, 4, KEY_J), - KEY(3, 5, TOSA_KEY_ADDRESSBOOK), - KEY(3, 6, TOSA_KEY_CANCEL), - KEY(3, 7, TOSA_KEY_CENTER), - KEY(3, 8, TOSA_KEY_OK), - KEY(3, 9, KEY_LEFTSHIFT), - KEY(4, 1, KEY_S), - KEY(4, 2, KEY_R), - KEY(4, 3, KEY_B), - KEY(4, 4, KEY_N), - KEY(4, 5, TOSA_KEY_CALENDAR), - KEY(4, 6, TOSA_KEY_HOMEPAGE), - KEY(4, 7, KEY_LEFTCTRL), - KEY(4, 8, TOSA_KEY_LIGHT), - KEY(4, 10, KEY_RIGHTSHIFT), - KEY(5, 1, KEY_TAB), - KEY(5, 2, KEY_SLASH), - KEY(5, 3, KEY_H), - KEY(5, 4, KEY_M), - KEY(5, 5, TOSA_KEY_MENU), - KEY(5, 7, KEY_UP), - KEY(5, 11, TOSA_KEY_FN), - KEY(6, 1, KEY_X), - KEY(6, 2, KEY_F), - KEY(6, 3, KEY_SPACE), - KEY(6, 4, KEY_APOSTROPHE), - KEY(6, 5, TOSA_KEY_MAIL), - KEY(6, 6, KEY_LEFT), - KEY(6, 7, KEY_DOWN), - KEY(6, 8, KEY_RIGHT), + KEY(0, 1, KEY_W), + KEY(0, 5, KEY_K), + KEY(0, 6, KEY_BACKSPACE), + KEY(0, 7, KEY_P), + KEY(1, 0, KEY_Q), + KEY(1, 1, KEY_E), + KEY(1, 2, KEY_T), + KEY(1, 3, KEY_Y), + KEY(1, 5, KEY_O), + KEY(1, 6, KEY_I), + KEY(1, 7, KEY_COMMA), + KEY(2, 0, KEY_A), + KEY(2, 1, KEY_D), + KEY(2, 2, KEY_G), + KEY(2, 3, KEY_U), + KEY(2, 5, KEY_L), + KEY(2, 6, KEY_ENTER), + KEY(2, 7, KEY_DOT), + KEY(3, 0, KEY_Z), + KEY(3, 1, KEY_C), + KEY(3, 2, KEY_V), + KEY(3, 3, KEY_J), + KEY(3, 4, TOSA_KEY_ADDRESSBOOK), + KEY(3, 5, TOSA_KEY_CANCEL), + KEY(3, 6, TOSA_KEY_CENTER), + KEY(3, 7, TOSA_KEY_OK), + KEY(3, 8, KEY_LEFTSHIFT), + KEY(4, 0, KEY_S), + KEY(4, 1, KEY_R), + KEY(4, 2, KEY_B), + KEY(4, 3, KEY_N), + KEY(4, 4, TOSA_KEY_CALENDAR), + KEY(4, 5, TOSA_KEY_HOMEPAGE), + KEY(4, 6, KEY_LEFTCTRL), + KEY(4, 7, TOSA_KEY_LIGHT), + KEY(4, 9, KEY_RIGHTSHIFT), + KEY(5, 0, KEY_TAB), + KEY(5, 1, KEY_SLASH), + KEY(5, 2, KEY_H), + KEY(5, 3, KEY_M), + KEY(5, 4, TOSA_KEY_MENU), + KEY(5, 6, KEY_UP), + KEY(5, 10, TOSA_KEY_FN), + KEY(6, 0, KEY_X), + KEY(6, 1, KEY_F), + KEY(6, 2, KEY_SPACE), + KEY(6, 3, KEY_APOSTROPHE), + KEY(6, 4, TOSA_KEY_MAIL), + KEY(6, 5, KEY_LEFT), + KEY(6, 6, KEY_DOWN), + KEY(6, 7, KEY_RIGHT), }; static struct matrix_keymap_data tosakbd_keymap_data = { diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10..2fddf38 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c @@ -8,8 +8,6 @@ * published by the Free Software Foundation. */ -#include <linux/clk-provider.h> -#include <linux/irqchip.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> @@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) panic("SoC is not S3C64xx!"); } -static void __init s3c64xx_dt_init_irq(void) -{ - of_clk_init(NULL); - samsung_wdt_reset_of_init(); - irqchip_init(); -}; - static void __init s3c64xx_dt_init_machine(void) { + samsung_wdt_reset_of_init(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } @@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ .dt_compat = s3c64xx_dt_compat, .map_io = s3c64xx_dt_map_io, - .init_irq = s3c64xx_dt_init_irq, .init_machine = s3c64xx_dt_init_machine, .restart = s3c64xx_dt_restart, MACHINE_END diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce6..e0406fd 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c @@ -245,7 +245,9 @@ static void __init lager_init(void) { lager_add_standard_devices(); - phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); + if (IS_ENABLED(CONFIG_PHYLIB)) + phy_register_fixup_for_id("r8a7790-ether-ff:01", + lager_ksz8041_fixup); } static const char * const lager_boards_compat_dt[] __initconst = { diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c index 9a4e910..3a9c1f1 100644 --- a/arch/arm/mach-tegra/fuse.c +++ b/arch/arm/mach-tegra/fuse.c @@ -198,10 +198,12 @@ void __init tegra_init_fuse(void) switch (tegra_chip_id) { case TEGRA20: tegra20_fuse_init_randomness(); + break; case TEGRA30: case TEGRA114: default: tegra30_fuse_init_randomness(); + break; } pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f6b6bfa..f61a570 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); +static int __dma_supported(struct device *dev, u64 mask, bool warn) +{ + unsigned long max_dma_pfn; + + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then we must + * indicate that DMA to this device is not supported. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) < max_pfn) { + if (warn) { + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", + mask); + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); + } + return 0; + } + + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + if (dma_to_pfn(dev, mask) < max_dma_pfn) { + if (warn) + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", + mask, + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, + max_dma_pfn + 1); + return 0; + } + + return 1; +} + static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)DMA_BIT_MASK(32); if (dev) { - unsigned long max_dma_pfn; - mask = dev->coherent_dma_mask; /* @@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then fail the - * allocation. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > max_dma_pfn) { - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", - mask); - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); - return 0; - } - - /* - * Now check that the mask, when translated to a PFN, - * fits within the allowable addresses which we can - * allocate. - */ - if (dma_to_pfn(dev, mask) < max_dma_pfn) { - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", - mask, - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, - arm_dma_pfn_limit + 1); + if (!__dma_supported(dev, mask, true)) return 0; - } } return mask; @@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int dma_supported(struct device *dev, u64 mask) { - unsigned long limit; - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then we must - * indicate that DMA to this device is not supported. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) - return 0; - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - limit = dma_to_pfn(dev, mask); - - if (limit < arm_dma_pfn_limit) - return 0; - - return 1; + return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3e8f106e..1f7b19a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { arm_dma_zone_size = mdesc->dma_zone_size; - arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f95..8550123 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, struct remap_data *info = data; struct page *page = info->pages[info->index++]; unsigned long pfn = page_to_pfn(page); - pte_t pte = pfn_pte(pfn, info->prot); + pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); if (map_foreign_page(pfn, info->fgmfn, info->domid)) return -EFAULT; @@ -224,10 +224,10 @@ static int __init xen_guest_init(void) } if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) return 0; - xen_hvm_resume_frames = res.start >> PAGE_SHIFT; + xen_hvm_resume_frames = res.start; xen_events_irq = irq_of_parse_and_map(node, 0); pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", - version, xen_events_irq, xen_hvm_resume_frames); + version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a..dde3fc9 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); } static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); } static inline void xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); } #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a21..6a8928b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, { int err, len, type, disabled = !ctrl.enabled; - if (disabled) { - len = 0; - type = HW_BREAKPOINT_EMPTY; - } else { - err = arch_bp_generic_fields(ctrl, &len, &type); - if (err) - return err; - - switch (note_type) { - case NT_ARM_HW_BREAK: - if ((type & HW_BREAKPOINT_X) != type) - return -EINVAL; - break; - case NT_ARM_HW_WATCH: - if ((type & HW_BREAKPOINT_RW) != type) - return -EINVAL; - break; - default: + attr->disabled = disabled; + if (disabled) + return 0; + + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) return -EINVAL; - } + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: + return -EINVAL; } attr->bp_len = len; attr->bp_type = type; - attr->disabled = disabled; return 0; } diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b7..bc23b1b 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); +extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, + struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, + struct kvmppc_book3s_shadow_vcpu *svcpu); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348..192917d 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -79,6 +79,7 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong scratch2; u8 in_guest; u8 restore_hid5; u8 napping; @@ -106,6 +107,7 @@ struct kvmppc_host_state { }; struct kvmppc_book3s_shadow_vcpu { + bool in_use; ulong gpr[14]; u32 cr; u32 xer; diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06b..7bdcf34 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); +int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, - uint32_t addr, uint32_t *data, uint32_t sz); + uint32_t addr, __be32 *data, uint32_t sz); int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); int64_t opal_manage_flash(uint8_t op); int64_t opal_update_flash(uint64_t blk_list); diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee1261..aace905 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); -extern void switch_booke_debug_regs(struct thread_struct *new_thread); +extern void switch_booke_debug_regs(struct debug_reg *new_debug); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc0..d3de010 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -576,6 +576,7 @@ int main(void) HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); + HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c..11c1d06 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) { unsigned long addr; - const u32 *basep, *sizep; + const __be32 *basep, *sizep; unsigned int rtas_start = 0, rtas_end = 0; basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); sizep = of_get_property(rtas.dev, "rtas-size", NULL); if (basep && sizep) { - rtas_start = *basep; - rtas_end = *basep + *sizep; + rtas_start = be32_to_cpup(basep); + rtas_end = rtas_start + be32_to_cpup(sizep); } for (addr = begin; addr < end; addr += PAGE_SIZE) { diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8a..4a96556 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) #endif } -static void prime_debug_regs(struct thread_struct *thread) +static void prime_debug_regs(struct debug_reg *debug) { /* * We could have inherited MSR_DE from userspace, since @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) */ mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_IAC1, thread->debug.iac1); - mtspr(SPRN_IAC2, thread->debug.iac2); + mtspr(SPRN_IAC1, debug->iac1); + mtspr(SPRN_IAC2, debug->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 - mtspr(SPRN_IAC3, thread->debug.iac3); - mtspr(SPRN_IAC4, thread->debug.iac4); + mtspr(SPRN_IAC3, debug->iac3); + mtspr(SPRN_IAC4, debug->iac4); #endif - mtspr(SPRN_DAC1, thread->debug.dac1); - mtspr(SPRN_DAC2, thread->debug.dac2); + mtspr(SPRN_DAC1, debug->dac1); + mtspr(SPRN_DAC2, debug->dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - mtspr(SPRN_DVC1, thread->debug.dvc1); - mtspr(SPRN_DVC2, thread->debug.dvc2); + mtspr(SPRN_DVC1, debug->dvc1); + mtspr(SPRN_DVC2, debug->dvc2); #endif - mtspr(SPRN_DBCR0, thread->debug.dbcr0); - mtspr(SPRN_DBCR1, thread->debug.dbcr1); + mtspr(SPRN_DBCR0, debug->dbcr0); + mtspr(SPRN_DBCR1, debug->dbcr1); #ifdef CONFIG_BOOKE - mtspr(SPRN_DBCR2, thread->debug.dbcr2); + mtspr(SPRN_DBCR2, debug->dbcr2); #endif } /* @@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) * debug registers, set the debug registers from the values * stored in the new thread. */ -void switch_booke_debug_regs(struct thread_struct *new_thread) +void switch_booke_debug_regs(struct debug_reg *new_debug) { if ((current->thread.debug.dbcr0 & DBCR0_IDM) - || (new_thread->debug.dbcr0 & DBCR0_IDM)) - prime_debug_regs(new_thread); + || (new_debug->dbcr0 & DBCR0_IDM)) + prime_debug_regs(new_debug); } EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ @@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS - switch_booke_debug_regs(&new->thread); + switch_booke_debug_regs(&new->thread.debug); #else /* * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb404..2e3d2bf 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.fp_state.fpr, + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); else tmp = child->thread.fp_state.fpscr; @@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.fp_state.fpr, &data, + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); else child->thread.fp_state.fpscr = data; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc804..bc76cc6 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; - const unsigned int *ireg; + const __be32 *ireg; num_addr_cell = of_n_addr_cells(dn); num_size_cell = of_n_size_cells(dn); @@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) if (!ireg) goto out; - maxcpus = ireg[num_addr_cell + num_size_cell]; + maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); /* Double maxcpus for processors which have SMT capability */ if (cpu_has_feature(CPU_FTR_SMT)) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3..c1cf4a1 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) int cpu_to_core_id(int cpu) { struct device_node *np; - const int *reg; + const __be32 *reg; int id = -1; np = of_get_cpu_node(cpu, NULL); @@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) if (!reg) goto out; - id = *reg; + id = be32_to_cpup(reg); out: of_node_put(np); return id; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587..c5d1484 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, slb_v = vcpu->kvm->arch.vrma_slb_v; } + preempt_disable(); /* Find the HPTE in the hash table */ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, HPTE_V_VALID | HPTE_V_ABSENT); - if (index < 0) + if (index < 0) { + preempt_enable(); return -ENOENT; + } hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); v = hptep[0] & ~HPTE_V_HVLOCK; gr = kvm->arch.revmap[index].guest_rpte; @@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Unlock the HPTE */ asm volatile("lwsync" : : : "memory"); hptep[0] = v; + preempt_enable(); gpte->eaddr = eaddr; gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); @@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, return -EFAULT; } else { page = pages[0]; + pfn = page_to_pfn(page); if (PageHuge(page)) { page = compound_head(page); pte_size <<= compound_order(page); @@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } rcu_read_unlock_sched(); } - pfn = page_to_pfn(page); } ret = -EFAULT; @@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; } - /* Set the HPTE to point to pfn */ - r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); + /* + * Set the HPTE to point to pfn. + * Since the pfn is at PAGE_SIZE granularity, make sure we + * don't mask out lower-order bits if psize < PAGE_SIZE. + */ + if (psize < PAGE_SIZE) + psize = PAGE_SIZE; + r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); if (hpte_is_writable(r) && !write_ok) r = hpte_make_readonly(r); ret = RESUME_GUEST; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f..b51d5db 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long flags; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && vc->preempt_tb != TB_NIL) { vc->stolen_tb += mftb() - vc->preempt_tb; @@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; vcpu->arch.busy_preempt = TB_NIL; } - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long flags; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) vc->preempt_tb = mftb(); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) vcpu->arch.busy_preempt = mftb(); - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) @@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) */ if (vc->vcore_state != VCORE_INACTIVE && vc->runner->arch.run_task != current) { - spin_lock(&vc->runner->arch.tbacct_lock); + spin_lock_irq(&vc->runner->arch.tbacct_lock); p = vc->stolen_tb; if (vc->preempt_tb != TB_NIL) p += now - vc->preempt_tb; - spin_unlock(&vc->runner->arch.tbacct_lock); + spin_unlock_irq(&vc->runner->arch.tbacct_lock); } else { p = vc->stolen_tb; } @@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, core_stolen = vcore_stolen_time(vc, now); stolen = core_stolen - vcpu->arch.stolen_logged; vcpu->arch.stolen_logged = core_stolen; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irq(&vcpu->arch.tbacct_lock); stolen += vcpu->arch.busy_stolen; vcpu->arch.busy_stolen = 0; - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irq(&vcpu->arch.tbacct_lock); if (!dt || !vpa) return; memset(dt, 0, sizeof(struct dtl_entry)); @@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (list_empty(&vcpu->kvm->arch.rtas_tokens)) return RESUME_HOST; + idx = srcu_read_lock(&vcpu->kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; @@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irq(&vcpu->arch.tbacct_lock); now = mftb(); vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - vcpu->arch.stolen_logged; vcpu->arch.busy_preempt = now; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irq(&vcpu->arch.tbacct_lock); --vc->n_runnable; list_del(&vcpu->arch.run_list); } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c51544..8689e2e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, is_io = pa & (HPTE_R_I | HPTE_R_W); pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); pa &= PAGE_MASK; + pa |= gpa & ~PAGE_MASK; } else { /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); @@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ptel = hpte_make_readonly(ptel); is_io = hpte_cache_bits(pte_val(pte)); pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (pte_size - 1); + pa |= gpa & ~PAGE_MASK; } } if (pte_size < psize) return H_PARAMETER; - if (pa && pte_size > psize) - pa |= gpa & (pte_size - 1); ptel &= ~(HPTE_R_PP0 - psize); ptel |= pa; @@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { 20, /* 1M, unsupported */ }; +/* When called from virtmode, this func should be protected by + * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK + * can trigger deadlock issue. + */ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75..be4fa04a 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 13: b machine_check_fwnmi - /* * We come in here when wakened from nap mode on a secondary hw thread. * Relocation is off and most register values are lost. @@ -224,6 +223,11 @@ kvm_start_guest: /* Clear our vcpu pointer so we don't come back in early */ li r0, 0 std r0, HSTATE_KVM_VCPU(r13) + /* + * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing + * the nap_count, because once the increment to nap_count is + * visible we could be given another vcpu. + */ lwsync /* Clear any pending IPI - we're an offline thread */ ld r5, HSTATE_XICS_PHYS(r13) @@ -241,7 +245,6 @@ kvm_start_guest: /* increment the nap count and then go to nap mode */ ld r4, HSTATE_KVM_VCORE(r13) addi r4, r4, VCORE_NAP_COUNT - lwsync /* make previous updates visible */ 51: lwarx r3, 0, r4 addi r3, r3, 1 stwcx. r3, 0, r4 @@ -751,15 +754,14 @@ kvmppc_interrupt_hv: * guest CR, R12 saved in shadow VCPU SCRATCH1/0 * guest R13 saved in SPRN_SCRATCH0 */ - /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ - std r9, HSTATE_HOST_R2(r13) + std r9, HSTATE_SCRATCH2(r13) lbz r9, HSTATE_IN_GUEST(r13) cmpwi r9, KVM_GUEST_MODE_HOST_HV beq kvmppc_bad_host_intr #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE cmpwi r9, KVM_GUEST_MODE_GUEST - ld r9, HSTATE_HOST_R2(r13) + ld r9, HSTATE_SCRATCH2(r13) beq kvmppc_interrupt_pr #endif /* We're now back in the host but in guest MMU context */ @@ -779,7 +781,7 @@ kvmppc_interrupt_hv: std r6, VCPU_GPR(R6)(r9) std r7, VCPU_GPR(R7)(r9) std r8, VCPU_GPR(R8)(r9) - ld r0, HSTATE_HOST_R2(r13) + ld r0, HSTATE_SCRATCH2(r13) std r0, VCPU_GPR(R9)(r9) std r10, VCPU_GPR(R10)(r9) std r11, VCPU_GPR(R11)(r9) @@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) */ /* Increment the threads-exiting-guest count in the 0xff00 bits of vcore->entry_exit_count */ - lwsync ld r5,HSTATE_KVM_VCORE(r13) addi r6,r5,VCORE_ENTRY_EXIT 41: lwarx r3,0,r6 addi r0,r3,0x100 stwcx. r0,0,r6 bne 41b - lwsync + isync /* order stwcx. vs. reading napping_threads */ /* * At this point we have an interrupt that we have to pass @@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) sld r0,r0,r4 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ beq 43f + /* Order entry/exit update vs. IPIs */ + sync mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ subf r6,r4,r13 42: andi. r0,r3,1 @@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) bge kvm_cede_exit stwcx. r4,0,r6 bne 31b + /* order napping_threads update vs testing entry_exit_count */ + isync li r0,1 stb r0,HSTATE_NAPPING(r13) - /* order napping_threads update vs testing entry_exit_count */ - lwsync mr r4,r3 lwz r7,VCORE_ENTRY_EXIT(r5) cmpwi r7,0x100 diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041..f779450 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -129,29 +129,32 @@ kvm_start_lightweight: * R12 = exit handler id * R13 = PACA * SVCPU.* = guest * + * MSR.EE = 1 * */ + PPC_LL r3, GPR4(r1) /* vcpu pointer */ + + /* + * kvmppc_copy_from_svcpu can clobber volatile registers, save + * the exit handler id to the vcpu and restore it from there later. + */ + stw r12, VCPU_TRAP(r3) + /* Transfer reg values from shadow vcpu back to vcpu struct */ /* On 64-bit, interrupts are still off at this point */ - PPC_LL r3, GPR4(r1) /* vcpu pointer */ + GET_SHADOW_VCPU(r4) bl FUNC(kvmppc_copy_from_svcpu) nop #ifdef CONFIG_PPC_BOOK3S_64 - /* Re-enable interrupts */ - ld r3, HSTATE_HOST_MSR(r13) - ori r3, r3, MSR_EE - MTMSR_EERI(r3) - /* * Reload kernel SPRG3 value. * No need to save guest value as usermode can't modify SPRG3. */ ld r3, PACA_SPRG3(r13) mtspr SPRN_SPRG3, r3 - #endif /* CONFIG_PPC_BOOK3S_64 */ /* R7 = vcpu */ @@ -177,7 +180,7 @@ kvm_start_lightweight: PPC_STL r31, VCPU_GPR(R31)(r7) /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ - mr r5, r12 + lwz r5, VCPU_TRAP(r7) /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3..5b9e906 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; + svcpu->in_use = 0; svcpu_put(svcpu); #endif vcpu->cpu = smp_processor_id(); @@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + if (svcpu->in_use) { + kvmppc_copy_from_svcpu(vcpu, svcpu); + } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); @@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, svcpu->ctr = vcpu->arch.ctr; svcpu->lr = vcpu->arch.lr; svcpu->pc = vcpu->arch.pc; + svcpu->in_use = true; } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, struct kvmppc_book3s_shadow_vcpu *svcpu) { + /* + * vcpu_put would just call us again because in_use hasn't + * been updated yet. + */ + preempt_disable(); + + /* + * Maybe we were already preempted and synced the svcpu from + * our preempt notifiers. Don't bother touching this svcpu then. + */ + if (!svcpu->in_use) + goto out; + vcpu->arch.gpr[0] = svcpu->gpr[0]; vcpu->arch.gpr[1] = svcpu->gpr[1]; vcpu->arch.gpr[2] = svcpu->gpr[2]; @@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; + svcpu->in_use = false; + +out: + preempt_enable(); } static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9..c3c5231 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) li r6, MSR_IR | MSR_DR andc r6, r5, r6 /* Clear DR and IR in MSR value */ -#ifdef CONFIG_PPC_BOOK3S_32 /* * Set EE in HOST_MSR so that it's enabled when we get into our - * C exit handler function. On 64-bit we delay enabling - * interrupts until we have finished transferring stuff - * to or from the PACA. + * C exit handler function. */ ori r5, r5, MSR_EE -#endif mtsrr0 r7 mtsrr1 r6 RFI diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a2..0591e05 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret, s; - struct thread_struct thread; + struct debug_reg debug; #ifdef CONFIG_PPC_FPU struct thread_fp_state fp; int fpexc_mode; @@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif /* Switch to guest debug context */ - thread.debug = vcpu->arch.shadow_dbg_reg; - switch_booke_debug_regs(&thread); - thread.debug = current->thread.debug; + debug = vcpu->arch.shadow_dbg_reg; + switch_booke_debug_regs(&debug); + debug = current->thread.debug; current->thread.debug = vcpu->arch.shadow_dbg_reg; kvmppc_fix_ee_before_entry(); @@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) We also get here with interrupts enabled. */ /* Switch back to user space debug context */ - switch_booke_debug_regs(&thread); - current->thread.debug = thread.debug; + switch_booke_debug_regs(&debug); + current->thread.debug = debug; #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4..79d83ca 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; static u8 opal_lpc_inb(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xffff) return 0xff; rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); - return rc ? 0xff : data; + return rc ? 0xff : be32_to_cpu(data); } static __le16 __opal_lpc_inw(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffe) return 0xffff; if (port & 1) return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); - return rc ? 0xffff : data; + return rc ? 0xffff : be32_to_cpu(data); } static u16 opal_lpc_inw(unsigned long port) { @@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) static __le32 __opal_lpc_inl(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffc) return 0xffffffff; @@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) (__le32)opal_lpc_inb(port + 2) << 8 | opal_lpc_inb(port + 3); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); - return rc ? 0xffffffff : data; + return rc ? 0xffffffff : be32_to_cpu(data); } static u32 opal_lpc_inl(unsigned long port) diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8f..4fbf276 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) { struct opal_scom_map *m = map; int64_t rc; + __be64 v; reg = opal_scom_unmangle(reg); - rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); + rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); + *value = be64_to_cpu(v); return opal_xscom_err_xlate(rc); } diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007..c9fecf0 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) { struct hvcall_ppp_data ppp_data; struct device_node *root; - const int *perf_level; + const __be32 *perf_level; int rc; rc = h_get_ppp(&ppp_data); @@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) perf_level = of_get_property(root, "ibm,partition-performance-parameters-level", NULL); - if (perf_level && (*perf_level >= 1)) { + if (perf_level && (be32_to_cpup(perf_level) >= 1)) { seq_printf(m, "physical_procs_allocated_to_virtualization=%d\n", ppp_data.phys_platform_procs); @@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) int partition_potential_processors; int partition_active_processors; struct device_node *rtas_node; - const int *lrdrp = NULL; + const __be32 *lrdrp = NULL; rtas_node = of_find_node_by_path("/rtas"); if (rtas_node) @@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; } else { - partition_potential_processors = *(lrdrp + 4); + partition_potential_processors = be32_to_cpup(lrdrp + 4); } of_node_put(rtas_node); @@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *model = ""; const char *system_id = ""; const char *tmp; - const unsigned int *lp_index_ptr; + const __be32 *lp_index_ptr; unsigned int lp_index = 0; seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); @@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); if (lp_index_ptr) - lp_index = *lp_index_ptr; + lp_index = be32_to_cpup(lp_index_ptr); of_node_put(rootdn); } seq_printf(m, "serial_number=%s\n", system_id); diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0ab..0c882e8 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) { struct device_node *dn; struct pci_dn *pdn; - const u32 *req_msi; + const __be32 *p; + u32 req_msi; pdn = pci_get_pdn(pdev); if (!pdn) @@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) dn = pdn->node; - req_msi = of_get_property(dn, prop_name, NULL); - if (!req_msi) { + p = of_get_property(dn, prop_name, NULL); + if (!p) { pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); return -ENOENT; } - if (*req_msi < nvec) { + req_msi = be32_to_cpup(p); + if (req_msi < nvec) { pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); - if (*req_msi == 0) /* Be paranoid */ + if (req_msi == 0) /* Be paranoid */ return -ENOSPC; - return *req_msi; + return req_msi; } return 0; @@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) { struct device_node *dn; - const u32 *p; + const __be32 *p; dn = of_node_get(pci_device_to_OF_node(dev)); while (dn) { @@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) if (p) { pr_debug("rtas_msi: found prop on dn %s\n", dn->full_name); - *total = *p; + *total = be32_to_cpup(p); return dn; } @@ -232,13 +234,13 @@ struct msi_counts { static void *count_non_bridge_devices(struct device_node *dn, void *data) { struct msi_counts *counts = data; - const u32 *p; + const __be32 *p; u32 class; pr_debug("rtas_msi: counting %s\n", dn->full_name); p = of_get_property(dn, "class-code", NULL); - class = p ? *p : 0; + class = p ? be32_to_cpup(p) : 0; if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) counts->num_devices++; @@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) static void *count_spare_msis(struct device_node *dn, void *data) { struct msi_counts *counts = data; - const u32 *p; + const __be32 *p; int req; if (dn == counts->requestor) @@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) req = 0; p = of_get_property(dn, "ibm,req#msi", NULL); if (p) - req = *p; + req = be32_to_cpup(p); p = of_get_property(dn, "ibm,req#msi-x", NULL); if (p) - req = max(req, (int)*p); + req = max(req, (int)be32_to_cpup(p)); } if (req < counts->quota) diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58..d7096f2 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ static DEFINE_SPINLOCK(nvram_lock); struct err_log_info { - int error_type; - unsigned int seq_num; + __be32 error_type; + __be32 seq_num; }; struct nvram_os_partition { @@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { }; struct oops_log_info { - u16 version; - u16 report_length; - u64 timestamp; + __be16 version; + __be16 report_length; + __be64 timestamp; } __attribute__((packed)); static void oops_to_nvram(struct kmsg_dumper *dumper, @@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, length = part->size; } - info.error_type = err_type; - info.seq_num = error_log_cnt; + info.error_type = cpu_to_be32(err_type); + info.seq_num = cpu_to_be32(error_log_cnt); tmp_index = part->index; @@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, } if (part->os_partition) { - *error_log_cnt = info.seq_num; - *err_type = info.error_type; + *error_log_cnt = be32_to_cpu(info.seq_num); + *err_type = be32_to_cpu(info.error_type); } return 0; @@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) pr_err("nvram: logging uncompressed oops/panic report\n"); return -1; } - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) zipped_len; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(zipped_len); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); return 0; } @@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, clobbering_unread_rtas_event()) return -1; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) size; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(size); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); if (compressed) err_type = ERR_TYPE_KERNEL_PANIC_GZ; @@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, size_t length, hdr_size; oops_hdr = (struct oops_log_info *)buff; - if (oops_hdr->version < OOPS_HDR_VERSION) { + if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { /* Old format oops header had 2-byte record size */ hdr_size = sizeof(u16); - length = oops_hdr->version; + length = be16_to_cpu(oops_hdr->version); time->tv_sec = 0; time->tv_nsec = 0; } else { hdr_size = sizeof(*oops_hdr); - length = oops_hdr->report_length; - time->tv_sec = oops_hdr->timestamp; + length = be16_to_cpu(oops_hdr->report_length); + time->tv_sec = be64_to_cpu(oops_hdr->timestamp); time->tv_nsec = 0; } *buf = kmalloc(length, GFP_KERNEL); @@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, kmsg_dump_get_buffer(dumper, false, oops_data, oops_data_sz, &text_len); err_type = ERR_TYPE_KERNEL_PANIC; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) text_len; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(text_len); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); } (void) nvram_write_os_partition(&oops_log_partition, oops_buf, - (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, + (int) (sizeof(*oops_hdr) + text_len), err_type, ++oops_count); spin_unlock_irqrestore(&lock, flags); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856..70670a2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; - const uint32_t *pcie_link_speed_stats; + const __be32 *pcie_link_speed_stats; bus = bridge->bus; @@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { - pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, + pcie_link_speed_stats = of_get_property(pdn, "ibm,pcie-link-speed-stats", NULL); if (pcie_link_speed_stats) break; @@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; } - switch (pcie_link_speed_stats[0]) { + switch (be32_to_cpup(pcie_link_speed_stats)) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; @@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) break; } - switch (pcie_link_speed_stats[1]) { + switch (be32_to_cpup(pcie_link_speed_stats)) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29..3baff31 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ checksum.o strlen.o div64.o div64-generic.o # Extracted from libgcc -lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ +obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ udiv_qrnnd.o diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc1..0f9e945 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) } #define pte_accessible pte_accessible -static inline unsigned long pte_accessible(pte_t a) +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) { return pte_val(a) & _PAGE_VALID; } @@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * and SUN4V pte layout, so this inline test is fine. */ - if (likely(mm != &init_mm) && pte_accessible(orig)) + if (likely(mm != &init_mm) && pte_accessible(mm, orig)) tlb_batch_add(mm, addr, ptep, orig, fullmm); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e903c71..0952ecd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -26,6 +26,7 @@ config X86 select HAVE_AOUT if X86_32 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_INT128 if X86_64 select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_IDE select HAVE_OPROFILE diff --git a/arch/x86/Makefile b/arch/x86/Makefile index eda00f9..57d0215 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y) KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return - # Don't autogenerate SSE instructions - KBUILD_CFLAGS += -mno-sse + # Don't autogenerate MMX or SSE instructions + KBUILD_CFLAGS += -mno-mmx -mno-sse # Never want PIC in a 32-bit kernel, prevent breakage with GCC built # with nonstandard options @@ -60,8 +60,8 @@ else KBUILD_AFLAGS += -m64 KBUILD_CFLAGS += -m64 - # Don't autogenerate SSE instructions - KBUILD_CFLAGS += -mno-sse + # Don't autogenerate MMX or SSE instructions + KBUILD_CFLAGS += -mno-mmx -mno-sse # Use -mpreferred-stack-boundary=3 if supported. KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index dce69a2..d9c1195 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE # How to compile the 16-bit code. Note we always compile for -march=i386, # that way we can complain to the user if the CPU is insufficient. -KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ +KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \ -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/code16gcc.h \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ + -mno-mmx -mno-sse \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ - $(call cc-option, -fno-unit-at-a-time)) \ + $(call cc-option, -fno-unit-at-a-time)) \ $(call cc-option, -fno-stack-protector) \ $(call cc-option, -mpreferred-stack-boundary=2) -KBUILD_CFLAGS += $(call cc-option, -m32) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index dcd90df..c8a6792 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small KBUILD_CFLAGS += $(cflags-y) +KBUILD_CFLAGS += -mno-mmx -mno-sse KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d19994..bbc8b12 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) } #define pte_accessible pte_accessible -static inline int pte_accessible(pte_t a) +static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { - return pte_flags(a) & _PAGE_PRESENT; + if (pte_flags(a) & _PAGE_PRESENT) + return true; + + if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && + mm_tlb_flush_pending(mm)) + return true; + + return false; } static inline int pte_hidden(pte_t pte) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723..c8b0519 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -8,6 +8,12 @@ DECLARE_PER_CPU(int, __preempt_count); /* + * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such + * that a decrement hitting 0 means we can and should reschedule. + */ +#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) + +/* * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users * that think a non-zero value indicates we cannot preempt. */ @@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) __this_cpu_add_4(__preempt_count, -val); } +/* + * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ static __always_inline bool __preempt_count_dec_and_test(void) { GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fd00bb2..c1a8618 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -262,11 +262,20 @@ struct cpu_hw_events { __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) -#define EVENT_CONSTRAINT_END \ - EVENT_CONSTRAINT(0, 0, 0) +/* + * We define the end marker as having a weight of -1 + * to enable blacklisting of events using a counter bitmask + * of zero and thus a weight of zero. + * The end marker has a weight that cannot possibly be + * obtained from counting the bits in the bitmask. + */ +#define EVENT_CONSTRAINT_END { .weight = -1 } +/* + * Check for end marker with weight == -1 + */ #define for_each_event_constraint(e, c) \ - for ((e) = (c); (e)->weight; (e)++) + for ((e) = (c); (e)->weight != -1; (e)++) /* * Extra registers for specific events. diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5439117..dec48bf 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic) return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; } +#define KVM_X2APIC_CID_BITS 0 + static void recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; @@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm) if (apic_x2apic_mode(apic)) { new->ldr_bits = 32; new->cid_shift = 16; - new->cid_mask = new->lid_mask = 0xffff; + new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; + new->lid_mask = 0xffff; } else if (kvm_apic_sw_enabled(apic) && !new->cid_mask /* flat mode */ && kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { @@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ASSERT(apic != NULL); /* if initial count is 0, current count should also be 0 */ - if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) + if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || + apic->lapic_timer.period == 0) return 0; remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); @@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; - void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); @@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; - vapic = kmap_atomic(vcpu->arch.apic->vapic_page); - data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); - kunmap_atomic(vapic); + kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32)); apic_set_tpr(vcpu->arch.apic, data & 0xff); } @@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) u32 data, tpr; int max_irr, max_isr; struct kvm_lapic *apic = vcpu->arch.apic; - void *vapic; apic_sync_pv_eoi_to_guest(vcpu, apic); @@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); - vapic = kmap_atomic(vcpu->arch.apic->vapic_page); - *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; - kunmap_atomic(vapic); + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32)); } -void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) +int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) { - vcpu->arch.apic->vapic_addr = vapic_addr; - if (vapic_addr) + if (vapic_addr) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.apic->vapic_cache, + vapic_addr, sizeof(u32))) + return -EINVAL; __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); - else + } else { __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); + } + + vcpu->arch.apic->vapic_addr = vapic_addr; + return 0; } int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index c730ac9..c8b0d0d 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -34,7 +34,7 @@ struct kvm_lapic { */ void *regs; gpa_t vapic_addr; - struct page *vapic_page; + struct gfn_to_hva_cache vapic_cache; unsigned long pending_events; unsigned int sipi_vector; }; @@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); -void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); +int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21ef1ba..5d004da 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; } diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46..0596e8e 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, pte_t pte = gup_get_pte(ptep); struct page *page; + /* Similar to the PMD case, NUMA hinting must take slow path */ + if (pte_numa(pte)) { + pte_unmap(ptep); + return 0; + } + if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { pte_unmap(ptep); return 0; @@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_large(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they + * can be serialised against THP migration. + */ + if (pmd_numa(pmd)) + return 0; if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) return 0; } else { diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 92c0234..cceb813 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -690,13 +690,6 @@ void __init efi_init(void) set_bit(EFI_MEMMAP, &x86_efi_facility); -#ifdef CONFIG_X86_32 - if (efi_is_native()) { - x86_platform.get_wallclock = efi_get_time; - x86_platform.set_wallclock = efi_set_rtc_mmss; - } -#endif - #if EFI_DEBUG print_efi_memmap(); #endif diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 0f92173..efe4d72 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, unsigned long status; bcp = &per_cpu(bau_control, cpu); - stat = bcp->statp; - stat->s_enters++; if (bcp->nobau) return cpumask; + stat = bcp->statp; + stat->s_enters++; + if (bcp->busy) { descriptor_status = read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 8869287..9cac825 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/../../boot/code16gcc.h \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ + -mno-mmx -mno-sse \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ - $(call cc-option, -fno-unit-at-a-time)) \ + $(call cc-option, -fno-unit-at-a-time)) \ $(call cc-option, -fno-stack-protector) \ $(call cc-option, -mpreferred-stack-boundary=2) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 26311f2..cb1d557 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count, static struct pstore_info erst_info = { .owner = THIS_MODULE, .name = "erst", + .flags = PSTORE_FLAGS_FRAGILE, .open = erst_open_pstore, .close = erst_close_pstore, .read = erst_reader, diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 98745dd..81f9775 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context, BUG_ON(reg_size != 4); - if (ctx->clk) { + if (!IS_ERR(ctx->clk)) { ret = clk_enable(ctx->clk); if (ret < 0) return ret; @@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context, offset += ctx->val_bytes; } - if (ctx->clk) + if (!IS_ERR(ctx->clk)) clk_disable(ctx->clk); return 0; @@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context, BUG_ON(reg_size != 4); - if (ctx->clk) { + if (!IS_ERR(ctx->clk)) { ret = clk_enable(ctx->clk); if (ret < 0) return ret; @@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context, offset += ctx->val_bytes; } - if (ctx->clk) + if (!IS_ERR(ctx->clk)) clk_disable(ctx->clk); return 0; @@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context) { struct regmap_mmio_context *ctx = context; - if (ctx->clk) { + if (!IS_ERR(ctx->clk)) { clk_unprepare(ctx->clk); clk_put(ctx->clk); } @@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, ctx->regs = regs; ctx->val_bytes = config->val_bits / 8; + ctx->clk = ERR_PTR(-ENODEV); if (clk_id == NULL) return ctx; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 9c021d9..c2e0021 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, val + (i * val_bytes), val_bytes); if (ret != 0) - return ret; + goto out; } } else { ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); @@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, /** * regmap_read(): Read a value from a single register * - * @map: Register map to write to + * @map: Register map to read from * @reg: Register to be read from * @val: Pointer to store read value * @@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read); /** * regmap_raw_read(): Read raw data from the device * - * @map: Register map to write to + * @map: Register map to read from * @reg: First register to be read from * @val: Pointer to store read value * @val_len: Size of data to read @@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read); /** * regmap_bulk_read(): Read multiple registers from the device * - * @map: Register map to write to + * @map: Register map to read from * @reg: First register to be read from * @val: Pointer to store read value, in native register size for device * @val_count: Number of registers to read diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ea192ec..f370fc1 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -495,23 +495,23 @@ static int null_add_dev(void) spin_lock_init(&nullb->lock); + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + if (setup_queues(nullb)) goto err; if (queue_mode == NULL_Q_MQ) { null_mq_reg.numa_node = home_node; null_mq_reg.queue_depth = hw_queue_depth; + null_mq_reg.nr_hw_queues = submit_queues; if (use_per_node_hctx) { null_mq_reg.ops->alloc_hctx = null_alloc_hctx; null_mq_reg.ops->free_hctx = null_free_hctx; - - null_mq_reg.nr_hw_queues = nr_online_nodes; } else { null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - - null_mq_reg.nr_hw_queues = submit_queues; } nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 7be41e6..00a3abe 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw) struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap, + ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, s2mps11->mask, s2mps11->mask); if (!ret) @@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw) struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, + ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, s2mps11->mask, ~s2mps11->mask); if (!ret) @@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) s2mps11_clk->hw.init = &s2mps11_clks_init[i]; s2mps11_clk->mask = 1 << i; - ret = regmap_read(s2mps11_clk->iodev->regmap, + ret = regmap_read(s2mps11_clk->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, &val); if (ret < 0) goto err_reg; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5c07a56..634c4d6 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK config CLKSRC_EFM32 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) + select CLKSRC_MMIO default ARCH_EFM32 help Support to use the timers of EFM32 SoCs as clock source and clock diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 35639cf4..b9ddd9e 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c @@ -35,6 +35,5 @@ void __init clocksource_of_init(void) init_func = match->data; init_func(np); - of_node_put(np); } } diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 45ba8ae..2a2ea27 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer) static u64 read_sched_clock(void) { - return __raw_readl(sched_io_base); + return ~__raw_readl(sched_io_base); } static const struct of_device_id sptimer_ids[] __initconst = { { .compatible = "picochip,pc3x2-rtc" }, - { .compatible = "snps,dw-apb-timer-sp" }, { /* Sentinel */ }, }; @@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer) num_called++; } CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); -CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 2fb4695..a4f6119 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node) writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(0)); + /* Make sure timer is stopped before playing with interrupts */ + sun4i_clkevt_time_stop(0); + ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index d8e47e5..4e7f680 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* - * Set scale and timer for sched_clock. - */ - sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); - - /* * Setup free-running clocksource timer (interrupts * disabled). */ @@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); + /* + * Set scale and timer for sched_clock. + */ + sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); + clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687c..c823daa 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -62,6 +62,7 @@ config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86 select DMA_ENGINE + select DMA_ENGINE_RAID select DCA help Enable support for the Intel(R) I/OAT DMA engine present @@ -112,6 +113,7 @@ config MV_XOR bool "Marvell XOR engine support" depends on PLAT_ORION select DMA_ENGINE + select DMA_ENGINE_RAID select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- Enable support for the Marvell XOR engine. @@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA tristate "AMCC PPC440SPe ADMA support" depends on 440SPe || 440SP select DMA_ENGINE + select DMA_ENGINE_RAID select ARCH_HAS_ASYNC_TX_FIND_CHANNEL select ASYNC_TX_ENABLE_CHANNEL_SWITCH help @@ -352,6 +355,7 @@ config NET_DMA bool "Network: TCP receive copy offload" depends on DMA_ENGINE && NET default (INTEL_IOATDMA || FSL_DMA) + depends on BROKEN help This enables the use of DMA engines in the network stack to offload receive copy-to-user operations, freeing CPU cycles. @@ -377,4 +381,7 @@ config DMATEST Simple DMA test client. Say N unless you're debugging a DMA Device driver. +config DMA_ENGINE_RAID + bool + endif diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 16a2aa2..ec4ee5c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd) struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); - dma_descriptor_unmap(txd); + dma_descriptor_unmap(&vd->tx); if (!txd->done) pl08x_release_mux(plchan); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index f31d647..2787aba 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} #if defined(VERBOSE_DEBUG) static void vdbg_dump_regs(struct at_dma_chan *atchan) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ea806bd..ef63b90 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -912,7 +912,7 @@ struct dmaengine_unmap_pool { #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } static struct dmaengine_unmap_pool unmap_pool[] = { __UNMAP_POOL(2), - #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) __UNMAP_POOL(16), __UNMAP_POOL(128), __UNMAP_POOL(256), @@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, dma_cookie_t cookie; unsigned long flags; - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); if (!unmap) return -ENOMEM; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3a..9dfcaf5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -539,9 +539,9 @@ static int dmatest_func(void *data) um->len = params->buf_size; for (i = 0; i < src_cnt; i++) { - unsigned long buf = (unsigned long) thread->srcs[i]; + void *buf = thread->srcs[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = buf & ~PAGE_MASK; + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); @@ -559,9 +559,9 @@ static int dmatest_func(void *data) /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - unsigned long buf = (unsigned long) thread->dsts[i]; + void *buf = thread->dsts[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = buf & ~PAGE_MASK; + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_BIDIRECTIONAL); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7086a16..f157c6f 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, hw->count = CPU_TO_DMA(chan, count, 32); } -static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) -{ - return DMA_TO_CPU(chan, desc->hw.count, 32); -} - static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { @@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } -static dma_addr_t get_desc_src(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) - ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; - return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; -} - static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { @@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } -static dma_addr_t get_desc_dst(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) - ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; -} - static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { @@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; - dma_cookie_t cookie; + dma_cookie_t cookie = -EINVAL; spin_lock_irqsave(&chan->desc_lock, flags); @@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct dma_async_tx_descriptor *txd = &desc->async_tx; - struct device *dev = chan->common.device->dev; - dma_addr_t src = get_desc_src(chan, desc); - dma_addr_t dst = get_desc_dst(chan, desc); - u32 len = get_desc_cnt(chan, desc); /* Run the link descriptor callback function */ if (txd->callback) { diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index dcb1e05..8869500 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op) } } + platform_set_drvdata(op, pdev); dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); return 0; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7807f0e..53fb0c8 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) hw_desc->desc_command = (1 << 31); } -static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_dest_addr; -} - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { @@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) /* * Perform a transaction to verify the HW works. */ -#define MV_XOR_TEST_SIZE 2000 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) { @@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; + struct dmaengine_unmap_data *unmap; int err = 0; - src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; - dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ - for (i = 0; i < MV_XOR_TEST_SIZE; i++) + for (i = 0; i < PAGE_SIZE; i++) ((u8 *) src)[i] = (u8)i; dma_chan = &mv_chan->dmachan; @@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) goto out; } - dest_dma = dma_map_single(dma_chan->device->dev, dest, - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); + if (!unmap) { + err = -ENOMEM; + goto free_resources; + } + + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, + PAGE_SIZE, DMA_TO_DEVICE); + unmap->to_cnt = 1; + unmap->addr[0] = src_dma; - src_dma = dma_map_single(dma_chan->device->dev, src, - MV_XOR_TEST_SIZE, DMA_TO_DEVICE); + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, + PAGE_SIZE, DMA_FROM_DEVICE); + unmap->from_cnt = 1; + unmap->addr[1] = dest_dma; + + unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, - MV_XOR_TEST_SIZE, 0); + PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); @@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) } dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); - if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { + PAGE_SIZE, DMA_FROM_DEVICE); + if (memcmp(src, dest, PAGE_SIZE)) { dev_err(dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; @@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) } free_resources: + dmaengine_unmap_put(unmap); mv_xor_free_chan_resources(dma_chan); out: kfree(src); @@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; + struct dmaengine_unmap_data *unmap; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; + int src_count = MV_XOR_NUM_SRC_TEST; - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + for (src_idx = 0; src_idx < src_count; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) @@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) } /* Fill in src buffers */ - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + for (src_idx = 0; src_idx < src_count; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) + for (src_idx = 0; src_idx < src_count; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | @@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) goto out; } + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, + GFP_KERNEL); + if (!unmap) { + err = -ENOMEM; + goto free_resources; + } + /* test xor */ - dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, - DMA_FROM_DEVICE); + for (i = 0; i < src_count; i++) { + unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], + 0, PAGE_SIZE, DMA_TO_DEVICE); + dma_srcs[i] = unmap->addr[i]; + unmap->to_cnt++; + } - for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], - 0, PAGE_SIZE, DMA_TO_DEVICE); + unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + dest_dma = unmap->addr[src_count]; + unmap->from_cnt = 1; + unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); + src_count, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); @@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) } free_resources: + dmaengine_unmap_put(unmap); mv_xor_free_chan_resources(dma_chan); out: - src_idx = MV_XOR_NUM_SRC_TEST; + src_idx = src_count; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); @@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) int i = 0; for_each_child_of_node(pdev->dev.of_node, np) { + struct mv_xor_chan *chan; dma_cap_mask_t cap_mask; int irq; @@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) goto err_channel_add; } - xordev->channels[i] = - mv_xor_channel_add(xordev, pdev, i, - cap_mask, irq); - if (IS_ERR(xordev->channels[i])) { - ret = PTR_ERR(xordev->channels[i]); - xordev->channels[i] = NULL; + chan = mv_xor_channel_add(xordev, pdev, i, + cap_mask, irq); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); irq_dispose_mapping(irq); goto err_channel_add; } + xordev->channels[i] = chan; i++; } } else if (pdata && pdata->channels) { for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { struct mv_xor_channel_data *cd; + struct mv_xor_chan *chan; int irq; cd = &pdata->channels[i]; @@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) goto err_channel_add; } - xordev->channels[i] = - mv_xor_channel_add(xordev, pdev, i, - cd->cap_mask, irq); - if (IS_ERR(xordev->channels[i])) { - ret = PTR_ERR(xordev->channels[i]); + chan = mv_xor_channel_add(xordev, pdev, i, + cd->cap_mask, irq); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); goto err_channel_add; } + + xordev->channels[i] = chan; } } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483..536632f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { - desc->pchan = NULL; desc->req.x = &desc->px; desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; - desc->rqcfg.privileged = 0; - desc->rqcfg.insnaccess = 0; desc->rqcfg.scctl = SCCTRL0; desc->rqcfg.dcctl = DCCTRL0; desc->req.cfg = &desc->rqcfg; @@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) if (!pdmac) return 0; - desc = kmalloc(count * sizeof(*desc), flg); + desc = kcalloc(count, sizeof(*desc), flg); if (!desc) return 0; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6..8bba298 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, } /** - * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation - */ -static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, - int value, unsigned long flags) -{ - struct dma_cdb *hw_desc = desc->hw_desc; - - memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); - desc->hw_next = NULL; - desc->src_cnt = 1; - desc->dst_cnt = 1; - - if (flags & DMA_PREP_INTERRUPT) - set_bit(PPC440SPE_DESC_INT, &desc->flags); - else - clear_bit(PPC440SPE_DESC_INT, &desc->flags); - - hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); - hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); - hw_desc->opc = DMA_CDB_OPC_DFILL128; -} - -/** * ppc440spe_desc_set_src_addr - set source address into the descriptor */ static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, @@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( struct ppc440spe_adma_chan *chan, dma_cookie_t cookie) { - int i; - BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; @@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) ppc440spe_adma_prep_dma_interrupt; } pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " - "( %s%s%s%s%s%s%s)\n", + "( %s%s%s%s%s%s)\n", dev_name(adev->dev), dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 4cb1279..4eddedb 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -628,42 +628,13 @@ retry: s3cchan->state = S3C24XX_DMA_CHAN_IDLE; } -static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd) -{ - struct device *dev = txd->vd.tx.chan->device->dev; - struct s3c24xx_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) { struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); if (!s3cchan->slave) - s3c24xx_dma_unmap_buffers(txd); + dma_descriptor_unmap(&vd->tx); s3c24xx_dma_free_txd(txd); } @@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&s3cchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) { + if (ret == DMA_COMPLETE) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index ebad845..3083d90 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -60,6 +60,7 @@ #define HPB_DMAE_DSTPR_DMSTP BIT(0) /* DMA status register (DSTSR) bits */ +#define HPB_DMAE_DSTSR_DQSTS BIT(2) #define HPB_DMAE_DSTSR_DMSTS BIT(0) /* DMA common registers */ @@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan) ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); + + chan->plane_idx = 0; + chan->first_desc = true; } static const struct hpb_dmae_slave_config * @@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan) struct hpb_dmae_chan *chan = to_chan(schan); u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); - return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; + if (chan->xfer_mode == XFER_DOUBLE) + return dstsr & HPB_DMAE_DSTSR_DQSTS; + else + return dstsr & HPB_DMAE_DSTSR_DMSTS; } static int @@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) } schan = &new_hpb_chan->shdma_chan; + schan->max_xfer_len = HPB_DMA_TCR_MAX; + shdma_chan_probe(sdev, schan, id); if (pdev->id >= 0) diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index bae6c29..17686ca 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; - struct txx9dmac_slave *ds = dc->chan.private; dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", txd->cookie, desc); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 8472405..d7f1b57 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, u32 tad_offset; u32 rir_way; u32 mb, kb; - u64 ch_addr, offset, limit, prv = 0; + u64 ch_addr, offset, limit = 0, prv = 0; /* diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index b0bb056..281029d 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = { .cmd_per_lun = 1, .can_queue = 1, .sdev_attrs = sbp2_scsi_sysfs_attrs, - .no_write_same = 1, }; MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 743fd42..4b9dc83 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, static struct pstore_info efi_pstore_info = { .owner = THIS_MODULE, .name = "efi", + .flags = PSTORE_FLAGS_FRAGILE, .open = efi_pstore_open, .close = efi_pstore_close, .read = efi_pstore_read, diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 8847adf..84be701 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) * NOTE: we assume for now that only irqs in the first gpio_chip * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). */ - if (offset < d->irq_base) + if (offset < d->gpio_unbanked) return d->gpio_irq + offset; else return -ENODEV; @@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) /* pass "bank 0" GPIO IRQs to AINTC */ chips[0].chip.to_irq = gpio_to_irq_unbanked; + chips[0].gpio_irq = bank_irq; + chips[0].gpio_unbanked = pdata->gpio_unbanked; binten = BIT(0); /* AINTC handles mask/unmask; GPIO handles triggering */ diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index 7b37300..2baf0dd 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) spin_lock_irqsave(&tlmm_lock, irq_flags); writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); - clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); + clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); __clear_bit(gpio, msm_gpio.enabled_irqs); spin_unlock_irqrestore(&tlmm_lock, irq_flags); } @@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) spin_lock_irqsave(&tlmm_lock, irq_flags); __set_bit(gpio, msm_gpio.enabled_irqs); - set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); + set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); spin_unlock_irqrestore(&tlmm_lock, irq_flags); } diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index fe088a3..8b7e719 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) u32 pending; unsigned int offset, irqs_handled = 0; - while ((pending = gpio_rcar_read(p, INTDT))) { + while ((pending = gpio_rcar_read(p, INTDT) & + gpio_rcar_read(p, INTMSK))) { offset = __ffs(pending); gpio_rcar_write(p, INTCLR, BIT(offset)); generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index b97d6a6..f999689 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset) if (offset < TWL4030_GPIO_MAX) ret = twl4030_set_gpio_direction(offset, 1); else - ret = -EINVAL; + ret = -EINVAL; /* LED outputs can't be set as input */ if (!ret) priv->direction &= ~BIT(offset); @@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); - int ret = -EINVAL; + int ret = 0; mutex_lock(&priv->mutex); - if (offset < TWL4030_GPIO_MAX) + if (offset < TWL4030_GPIO_MAX) { ret = twl4030_set_gpio_direction(offset, 0); + if (ret) { + mutex_unlock(&priv->mutex); + return ret; + } + } + + /* + * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output + */ priv->direction |= BIT(offset); mutex_unlock(&priv->mutex); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0a1e4a5..8835dcd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -68,6 +68,8 @@ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) /* Force reduced-blanking timings for detailed modes */ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) +/* Force 8bpc */ +#define EDID_QUIRK_FORCE_8BPC (1 << 8) struct detailed_mode_closure { struct drm_connector *connector; @@ -128,6 +130,9 @@ static struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, }; /* @@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) drm_add_display_info(edid, &connector->display_info); + if (quirks & EDID_QUIRK_FORCE_8BPC) + connector->display_info.bpc = 8; + return num_modes; } EXPORT_SYMBOL(drm_add_edid_modes); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index f53d524..66dd3a0 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -566,11 +566,11 @@ err_unload: if (dev->driver->unload) dev->driver->unload(dev); err_primary_node: - drm_put_minor(dev->primary); + drm_unplug_minor(dev->primary); err_render_node: - drm_put_minor(dev->render); + drm_unplug_minor(dev->render); err_control_node: - drm_put_minor(dev->control); + drm_unplug_minor(dev->control); err_agp: if (dev->driver->bus->agp_destroy) dev->driver->bus->agp_destroy(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7a3759f..98a22e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) if (nouveau_runtime_pm == 0) return -EINVAL; + /* are we optimus enabled? */ + if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { + DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); + return -EINVAL; + } + nv_debug_level(SILENT); drm_kms_helper_poll_disable(drm_dev); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 037d324..66ac0ff 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -8,5 +8,6 @@ config DRM_QXL select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_TTM + select CRC32 help QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5e827c2..d70aafb 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -24,7 +24,7 @@ */ -#include "linux/crc32.h" +#include <linux/crc32.h> #include "qxl_drv.h" #include "qxl_object.h" diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 80a2012..b197059 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, } else if ((rdev->family == CHIP_TAHITI) || (rdev->family == CHIP_PITCAIRN)) fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); - else if (rdev->family == CHIP_VERDE) + else if ((rdev->family == CHIP_VERDE) || + (rdev->family == CHIP_OLAND) || + (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); switch (radeon_crtc->crtc_id) { diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 0300727..d08b83c 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev, radeon_ring_write(ring, 0); /* src/dst endian swap */ radeon_ring_write(ring, src_offset & 0xffffffff); radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); - radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, dst_offset & 0xffffffff); radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index de86493..713a5d3 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) } sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); return; } @@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } @@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev) rdev->audio.enabled = true; if (ASIC_IS_DCE8(rdev)) - rdev->audio.num_pins = 7; + rdev->audio.num_pins = 6; + else if (ASIC_IS_DCE61(rdev)) + rdev->audio.num_pins = 4; else rdev->audio.num_pins = 6; diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index aa695c4..0c6d5ce 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) } sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); return; } @@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 11aab2a..f59a9e9 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x999C)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; + rdev->config.cayman.max_hw_contexts = 8; + rdev->config.cayman.sx_max_export_size = 256; + rdev->config.cayman.sx_max_export_pos_size = 64; + rdev->config.cayman.sx_max_export_smx_size = 192; } else if ((rdev->pdev->device == 0x9903) || (rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x990A) || @@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x999D)) { rdev->config.cayman.max_simds_per_se = 4; rdev->config.cayman.max_backends_per_se = 2; + rdev->config.cayman.max_hw_contexts = 8; + rdev->config.cayman.sx_max_export_size = 256; + rdev->config.cayman.sx_max_export_pos_size = 64; + rdev->config.cayman.sx_max_export_smx_size = 192; } else if ((rdev->pdev->device == 0x9919) || (rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9991) || @@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; + rdev->config.cayman.max_hw_contexts = 4; + rdev->config.cayman.sx_max_export_size = 128; + rdev->config.cayman.sx_max_export_pos_size = 32; + rdev->config.cayman.sx_max_export_smx_size = 96; } else { rdev->config.cayman.max_simds_per_se = 2; rdev->config.cayman.max_backends_per_se = 1; + rdev->config.cayman.max_hw_contexts = 4; + rdev->config.cayman.sx_max_export_size = 128; + rdev->config.cayman.sx_max_export_pos_size = 32; + rdev->config.cayman.sx_max_export_smx_size = 96; } rdev->config.cayman.max_texture_channel_caches = 2; rdev->config.cayman.max_gprs = 256; @@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) rdev->config.cayman.max_gs_threads = 32; rdev->config.cayman.max_stack_entries = 512; rdev->config.cayman.sx_num_of_sets = 8; - rdev->config.cayman.sx_max_export_size = 256; - rdev->config.cayman.sx_max_export_pos_size = 64; - rdev->config.cayman.sx_max_export_smx_size = 192; - rdev->config.cayman.max_hw_contexts = 8; rdev->config.cayman.sq_num_cf_insts = 2; rdev->config.cayman.sc_prim_fifo_size = 0x40; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e354ce9..c0425bb 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = NULL, + .blit = &cik_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = NULL, + .blit = &cik_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 9f5ff28..1958b36a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -508,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = { #endif }; - -static void -radeon_pci_shutdown(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - radeon_driver_unload_kms(dev); -} - static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | @@ -586,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = { .probe = radeon_pci_probe, .remove = radeon_pci_remove, .driver.pm = &radeon_pm_ops, - .shutdown = radeon_pci_shutdown, }; static int __init radeon_init(void) diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index dc75bb6..984097b 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -552,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); int hyst = to_sensor_dev_attr(attr)->index; int temp; @@ -580,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = container_of(kobj, struct device, kobj); - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); /* Skip limit attributes if DPM is not enabled */ if (rdev->pm.pm_method != PM_METHOD_DPM && diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 1c56062..e7dab06 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev) base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + /* Some boards seem to be configured for 128MB of sideport memory, + * but really only have 64MB. Just skip the sideport and use + * UMA memory. + */ + if (rdev->mc.igp_sideport_enabled && + (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { + base += 128 * 1024 * 1024; + rdev->mc.real_vram_size -= 128 * 1024 * 1024; + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; + } /* Use K8 direct mapping for fast fb access. */ rdev->fastfb_working = false; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 913b025..374499d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); + /* disable ss, causes hangs on some cayman boards */ + if (rdev->family == CHIP_CAYMAN) { + pi->sclk_ss = false; + pi->mclk_ss = false; + } + if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 15b86a9..4061521 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Don't move nonexistent data. Clear destination instead. */ if (old_iomap == NULL && - (ttm == NULL || ttm->state == tt_unpopulated)) { + (ttm == NULL || (ttm->state == tt_unpopulated && + !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b249ab9..6440eea 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + - drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; - page_last = vma_pages(vma) + - drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; + vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); + page_last = vma_pages(vma) + vma->vm_pgoff - + drm_vma_node_start(&bo->vma_node); if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a51f48e..45d5b5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, SVGA_FIFO_3D_HWVERSION)); break; } + case DRM_VMW_PARAM_MAX_SURF_MEMORY: + param->value = dev_priv->memory_size; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index ecb5ca6..e776963 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -341,6 +341,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, case USB_DEVICE_ID_GENIUS_GX_IMPERATOR: rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, "Genius Gx Imperator Keyboard"); + break; case USB_DEVICE_ID_GENIUS_MANTICORE: rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, "Genius Manticore Keyboard"); diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index d87f7cb..8fab828 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -569,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev, goto err_free_names; } sd->hid_sensor_hub_client_devs[ + sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO; + sd->hid_sensor_hub_client_devs[ sd->hid_sensor_client_cnt].name = name; sd->hid_sensor_hub_client_devs[ sd->hid_sensor_client_cnt].platform_data = diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c index 2dc37c7..7d68a08 100644 --- a/drivers/hwmon/hih6130.c +++ b/drivers/hwmon/hih6130.c @@ -43,6 +43,7 @@ * @last_update: time of last update (jiffies) * @temperature: cached temperature measurement value * @humidity: cached humidity measurement value + * @write_length: length for I2C measurement request */ struct hih6130 { struct device *hwmon_dev; @@ -51,6 +52,7 @@ struct hih6130 { unsigned long last_update; int temperature; int humidity; + size_t write_length; }; /** @@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client) */ if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { - /* write to slave address, no data, to request a measurement */ - ret = i2c_master_send(client, tmp, 0); + /* + * Write to slave address to request a measurement. + * According with the datasheet it should be with no data, but + * for systems with I2C bus drivers that do not allow zero + * length packets we write one dummy byte to allow sensor + * measurements on them. + */ + tmp[0] = 0; + ret = i2c_master_send(client, tmp, hih6130->write_length); if (ret < 0) goto out; @@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client, goto fail_remove_sysfs; } + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK)) + hih6130->write_length = 1; + return 0; fail_remove_sysfs: diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index 6cf6bff..a2f3b4a 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm <= 0) return 255; + if (rpm > 1350000) + return 1; return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 4c4c142..8b8f3aa 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client, "lm90", client); if (err < 0) { dev_err(dev, "cannot request IRQ %d\n", client->irq); - goto exit_remove_files; + goto exit_unregister; } } return 0; +exit_unregister: + hwmon_device_unregister(data->hwmon_dev); exit_remove_files: lm90_remove_files(client, data); exit_restore: diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 1404e63..72a8897 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c @@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm <= 0) return 255; + if (rpm > 1350000) + return 1; return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index 0e70178..aee14e2 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c @@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 }; */ static inline u8 FAN_TO_REG(long rpm, int div) { - if (rpm == 0) + if (rpm <= 0 || rpm > 1310720) return 0; return clamp_val(1310720 / (rpm * div), 1, 255); } diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index edb06cd..6ed76ce 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr, if (err) return err; val = clamp_val(val, 0, 255); + val = DIV_ROUND_CLOSEST(val, 0x11); mutex_lock(&data->update_lock); - data->pwm[nr] = val; + data->pwm[nr] = val * 0x11; + val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0; w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); mutex_unlock(&data->update_lock); return count; @@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); data->pwm_enable[nr] = val; - reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]); + reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]); reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); mutex_unlock(&data->update_lock); @@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev) ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = - ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1; - data->pwm[i] = w83l786ng_read_value(client, - W83L786NG_REG_PWM[i]); + ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1; + data->pwm[i] = + (w83l786ng_read_value(client, W83L786NG_REG_PWM[i]) + & 0x0f) * 0x11; } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 1d7efa3..d0cfbb4 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - clk_prepare_enable(i2c_imx->clk); + result = clk_prepare_enable(i2c_imx->clk); + if (result) + return result; imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); /* Enable I2C controller */ imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 797e311..2d0847b 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, priv->adap.algo = &priv->algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &parent->dev; + priv->adap.retries = parent->retries; + priv->adap.timeout = parent->timeout; /* Sanity check on class */ if (i2c_mux_parent_classes(parent) & class) diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index acb7f90..749a6ca 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c @@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = 1, .scan_index = 1, - .scan_type = IIO_ST('u', 12, 16, 0), + .scan_type = { + .sign = 'u', + .realbits = 12, + .storagebits = 16, + .shift = 0, + .endianness = IIO_BE, + }, }, .channel[1] = { .type = IIO_VOLTAGE, @@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = 0, .scan_index = 0, - .scan_type = IIO_ST('u', 12, 16, 0), + .scan_type = { + .sign = 'u', + .realbits = 12, + .storagebits = 16, + .shift = 0, + .endianness = IIO_BE, + }, }, .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), .int_vref_mv = 2500, diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 3fb7757..368660d 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = { .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = ADIS16448_BARO_OUT, .scan_index = ADIS16400_SCAN_BARO, - .scan_type = IIO_ST('s', 16, 16, 0), + .scan_type = { + .sign = 's', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_BE, + }, }, ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), IIO_CHAN_SOFT_TIMESTAMP(11) diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c index 21df571..0922e39 100644 --- a/drivers/iio/light/cm36651.c +++ b/drivers/iio/light/cm36651.c @@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651, return -EINVAL; } - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT; } static int cm36651_write_int_time(struct cm36651_data *cm36651, diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6be57c3..9804fca 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) isert_conn->conn_rx_descs = NULL; } +static void isert_cq_tx_work(struct work_struct *); static void isert_cq_tx_callback(struct ib_cq *, void *); +static void isert_cq_rx_work(struct work_struct *); static void isert_cq_rx_callback(struct ib_cq *, void *); static int @@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device) cq_desc[i].device = device; cq_desc[i].cq_index = i; + INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); device->dev_rx_cq[i] = ib_create_cq(device->ib_device, isert_cq_rx_callback, isert_cq_event_callback, (void *)&cq_desc[i], ISER_MAX_RX_CQ_LEN, i); - if (IS_ERR(device->dev_rx_cq[i])) + if (IS_ERR(device->dev_rx_cq[i])) { + ret = PTR_ERR(device->dev_rx_cq[i]); + device->dev_rx_cq[i] = NULL; goto out_cq; + } + INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); device->dev_tx_cq[i] = ib_create_cq(device->ib_device, isert_cq_tx_callback, isert_cq_event_callback, (void *)&cq_desc[i], ISER_MAX_TX_CQ_LEN, i); - if (IS_ERR(device->dev_tx_cq[i])) + if (IS_ERR(device->dev_tx_cq[i])) { + ret = PTR_ERR(device->dev_tx_cq[i]); + device->dev_tx_cq[i] = NULL; goto out_cq; + } - if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) + ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); + if (ret) goto out_cq; - if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) + ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); + if (ret) goto out_cq; } @@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context) { struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; - INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); queue_work(isert_comp_wq, &cq_desc->cq_tx_work); } @@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context) { struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; - INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); queue_work(isert_rx_wq, &cq_desc->cq_rx_work); } diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 0735de3..1cb1da2 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c @@ -158,7 +158,7 @@ /* ORIENT ADXL346 only */ #define ADXL346_2D_VALID (1 << 6) -#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) +#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4) #define ADXL346_3D_VALID (1 << 3) #define ADXL346_3D_ORIENT(x) ((x) & 0x7) #define ADXL346_2D_PORTRAIT_POS 0 /* +X */ diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 98707fb..8f4c4ab 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type); static DEVICE_ATTR_RO(proto); static DEVICE_ATTR_RO(id); static DEVICE_ATTR_RO(extra); -static DEVICE_ATTR_RO(modalias); -static DEVICE_ATTR_WO(drvctl); -static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); -static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); static struct attribute *serio_device_id_attrs[] = { &dev_attr_type.attr, &dev_attr_proto.attr, &dev_attr_id.attr, &dev_attr_extra.attr, + NULL +}; + +static struct attribute_group serio_id_attr_group = { + .name = "id", + .attrs = serio_device_id_attrs, +}; + +static DEVICE_ATTR_RO(modalias); +static DEVICE_ATTR_WO(drvctl); +static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); +static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); + +static struct attribute *serio_device_attrs[] = { &dev_attr_modalias.attr, &dev_attr_description.attr, &dev_attr_drvctl.attr, @@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = { NULL }; -static struct attribute_group serio_id_attr_group = { - .name = "id", - .attrs = serio_device_id_attrs, +static struct attribute_group serio_device_attr_group = { + .attrs = serio_device_attrs, }; static const struct attribute_group *serio_device_attr_groups[] = { &serio_id_attr_group, + &serio_device_attr_group, NULL }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1abfb56..e46a887 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -392,7 +392,7 @@ struct arm_smmu_domain { struct arm_smmu_cfg root_cfg; phys_addr_t output_mask; - spinlock_t lock; + struct mutex lock; }; static DEFINE_SPINLOCK(arm_smmu_devices_lock); @@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) goto out_free_domain; smmu_domain->root_cfg.pgd = pgd; - spin_lock_init(&smmu_domain->lock); + mutex_init(&smmu_domain->lock); domain->priv = smmu_domain; return 0; @@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't currently support domains * that cross between different SMMU chains. */ - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); if (!smmu_domain->leaf_smmu) { /* Now that we have a master, we can finalise the domain */ ret = arm_smmu_init_domain_context(domain, dev); @@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) dev_name(device_smmu->dev)); goto err_unlock; } - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Looks ok, so add the device to the domain */ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); @@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return arm_smmu_domain_add_master(smmu_domain, master); err_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); return ret; } @@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, if (paddr & ~output_mask) return -ERANGE; - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); pgd += pgd_index(iova); end = iova + size; do { @@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, } while (pgd++, iova != end); out_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Ensure new page tables are visible to the hardware walker */ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) @@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int flags) { struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; - if (!smmu_domain || !smmu) + if (!smmu_domain) return -ENODEV; /* Check for silent address truncation up the SMMU chain. */ @@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp, pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; - spin_lock(&smmu_domain->lock); - pgd = root_cfg->pgd; - if (!pgd) - goto err_unlock; + pgdp = root_cfg->pgd; + if (!pgdp) + return 0; - pgd += pgd_index(iova); - if (pgd_none_or_clear_bad(pgd)) - goto err_unlock; + pgd = *(pgdp + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; - pud = pud_offset(pgd, iova); - if (pud_none_or_clear_bad(pud)) - goto err_unlock; + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; - pmd = pmd_offset(pud, iova); - if (pmd_none_or_clear_bad(pmd)) - goto err_unlock; + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; - pte = pmd_page_vaddr(*pmd) + pte_index(iova); + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); if (pte_none(pte)) - goto err_unlock; - - spin_unlock(&smmu_domain->lock); - return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); + return 0; -err_unlock: - spin_unlock(&smmu_domain->lock); - dev_warn(smmu->dev, - "invalid (corrupt?) page tables detected for iova 0x%llx\n", - (unsigned long long)iova); - return -EINVAL; + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); } static int arm_smmu_domain_has_cap(struct iommu_domain *domain, @@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) dev_err(dev, "found only %d context interrupt(s) but %d required\n", smmu->num_context_irqs, smmu->num_context_banks); + err = -ENODEV; goto out_put_parent; } diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 82cec63..3ee78f0 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p, static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, int irq, int do_mask) { - int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ - int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ + /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */ + int bitfield_width = 4; + int shift = 32 - (irq + 1) * bitfield_width; intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, shift, bitfield_width, @@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) { + /* The SENSE register is assumed to be 32-bit. */ int bitfield_width = p->config.sense_bitfield_width; - int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ + int shift = 32 - (irq + 1) * bitfield_width; dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 173cbb2..54bdd923 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void) { __u64 mem; + dm_bufio_allocated_kmem_cache = 0; + dm_bufio_allocated_get_free_pages = 0; + dm_bufio_allocated_vmalloc = 0; + dm_bufio_current_allocated = 0; + memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 416b7b7..64780ad 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, int r = 0; bool updated = updated_this_tick(mq, e); - requeue_and_update_tick(mq, e); - if ((!discarded_oblock && updated) || - !should_promote(mq, e, discarded_oblock, data_dir)) + !should_promote(mq, e, discarded_oblock, data_dir)) { + requeue_and_update_tick(mq, e); result->op = POLICY_MISS; - else if (!can_migrate) + + } else if (!can_migrate) r = -EWOULDBLOCK; - else + + else { + requeue_and_update_tick(mq, e); r = pre_cache_to_cache(mq, e, result); + } return r; } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9efcf10..1b1469e 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) { int r; - r = dm_cache_resize(cache->cmd, cache->cache_size); + r = dm_cache_resize(cache->cmd, new_size); if (r) { DMERR("could not resize cache metadata"); return r; diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 496d5f3..2f91d6d 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -20,6 +20,7 @@ struct delay_c { struct timer_list delay_timer; struct mutex timer_lock; + struct workqueue_struct *kdelayd_wq; struct work_struct flush_expired_bios; struct list_head delayed_bios; atomic_t may_delay; @@ -45,14 +46,13 @@ struct dm_delay_info { static DEFINE_MUTEX(delayed_bios_lock); -static struct workqueue_struct *kdelayd_wq; static struct kmem_cache *delayed_cache; static void handle_delayed_timer(unsigned long data) { struct delay_c *dc = (struct delay_c *)data; - queue_work(kdelayd_wq, &dc->flush_expired_bios); + queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) @@ -191,6 +191,12 @@ out: goto bad_dev_write; } + dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); + if (!dc->kdelayd_wq) { + DMERR("Couldn't start kdelayd"); + goto bad_queue; + } + setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); @@ -203,6 +209,8 @@ out: ti->private = dc; return 0; +bad_queue: + mempool_destroy(dc->delayed_pool); bad_dev_write: if (dc->dev_write) dm_put_device(ti, dc->dev_write); @@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; - flush_workqueue(kdelayd_wq); + destroy_workqueue(dc->kdelayd_wq); dm_put_device(ti, dc->dev_read); @@ -350,12 +358,6 @@ static int __init dm_delay_init(void) { int r = -ENOMEM; - kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); - if (!kdelayd_wq) { - DMERR("Couldn't start kdelayd"); - goto bad_queue; - } - delayed_cache = KMEM_CACHE(dm_delay_info, 0); if (!delayed_cache) { DMERR("Couldn't create delayed bio cache."); @@ -373,8 +375,6 @@ static int __init dm_delay_init(void) bad_register: kmem_cache_destroy(delayed_cache); bad_memcache: - destroy_workqueue(kdelayd_wq); -bad_queue: return r; } @@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void) { dm_unregister_target(&delay_target); kmem_cache_destroy(delayed_cache); - destroy_workqueue(kdelayd_wq); } /* Module hooks */ diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index aec57d7..944690b 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -66,6 +66,18 @@ struct dm_snapshot { atomic_t pending_exceptions_count; + /* Protected by "lock" */ + sector_t exception_start_sequence; + + /* Protected by kcopyd single-threaded callback */ + sector_t exception_complete_sequence; + + /* + * A list of pending exceptions that completed out of order. + * Protected by kcopyd single-threaded callback. + */ + struct list_head out_of_order_list; + mempool_t *pending_pool; struct dm_exception_table pending; @@ -173,6 +185,14 @@ struct dm_snap_pending_exception { */ int started; + /* There was copying error. */ + int copy_error; + + /* A sequence number, it is used for in-order completion. */ + sector_t exception_sequence; + + struct list_head out_of_order_entry; + /* * For writing a complete chunk, bypassing the copy. */ @@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->valid = 1; s->active = 0; atomic_set(&s->pending_exceptions_count, 0); + s->exception_start_sequence = 0; + s->exception_complete_sequence = 0; + INIT_LIST_HEAD(&s->out_of_order_list); init_rwsem(&s->lock); INIT_LIST_HEAD(&s->list); spin_lock_init(&s->pe_lock); @@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success) pending_complete(pe, success); } +static void complete_exception(struct dm_snap_pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + + if (unlikely(pe->copy_error)) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store->type->commit_exception(s->store, &pe->e, + commit_callback, pe); +} + /* * Called when the copy I/O has finished. kcopyd actually runs * this code so don't block. @@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) struct dm_snap_pending_exception *pe = context; struct dm_snapshot *s = pe->snap; - if (read_err || write_err) - pending_complete(pe, 0); + pe->copy_error = read_err || write_err; - else - /* Update the metadata if we are persistent */ - s->store->type->commit_exception(s->store, &pe->e, - commit_callback, pe); + if (pe->exception_sequence == s->exception_complete_sequence) { + s->exception_complete_sequence++; + complete_exception(pe); + + while (!list_empty(&s->out_of_order_list)) { + pe = list_entry(s->out_of_order_list.next, + struct dm_snap_pending_exception, out_of_order_entry); + if (pe->exception_sequence != s->exception_complete_sequence) + break; + s->exception_complete_sequence++; + list_del(&pe->out_of_order_entry); + complete_exception(pe); + } + } else { + struct list_head *lh; + struct dm_snap_pending_exception *pe2; + + list_for_each_prev(lh, &s->out_of_order_list) { + pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry); + if (pe2->exception_sequence < pe->exception_sequence) + break; + } + list_add(&pe->out_of_order_entry, lh); + } } /* @@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s, return NULL; } + pe->exception_sequence = s->exception_start_sequence++; + dm_insert_exception(&s->pending, &pe->e); return pe; @@ -2192,7 +2249,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 11, 1}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 3d404c1..28a9012 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, int __init dm_statistics_init(void) { + shared_memory_amount = 0; dm_stat_need_rcu_barrier = 0; return 0; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 465f08c..3ba6a38 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode, num_targets = dm_round_up(num_targets, KEYS_PER_NODE); + if (!num_targets) { + kfree(t); + return -ENOMEM; + } + if (alloc_targets(t, num_targets)) { kfree(t); return -ENOMEM; diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 60bce43..8a30ad5 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) up_write(&pmd->root_lock); } +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) +{ + down_write(&pmd->root_lock); + pmd->read_only = false; + dm_bm_set_read_write(pmd->bm); + up_write(&pmd->root_lock); +} + int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_block_t threshold, dm_sm_threshold_fn fn, diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 845ebbe..7bcc0e1 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz * that nothing is changing. */ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd); int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_block_t threshold, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2c0cf51..ee29037 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); if (r) { - DMERR_LIMIT("dm_thin_insert_block() failed"); + DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d", + dm_device_name(pool->pool_md), r); + set_pool_mode(pool, PM_READ_ONLY); cell_error(pool, m->cell); goto out; } @@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, } } -static int commit(struct pool *pool) -{ - int r; - - r = dm_pool_commit_metadata(pool->pmd); - if (r) - DMERR_LIMIT("%s: commit failed: error = %d", - dm_device_name(pool->pool_md), r); - - return r; -} - /* * A non-zero return indicates read_only or fail_io mode. * Many callers don't care about the return value. */ -static int commit_or_fallback(struct pool *pool) +static int commit(struct pool *pool) { int r; if (get_pool_mode(pool) != PM_WRITE) return -EINVAL; - r = commit(pool); - if (r) + r = dm_pool_commit_metadata(pool->pmd); + if (r) { + DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d", + dm_device_name(pool->pool_md), r); set_pool_mode(pool, PM_READ_ONLY); + } return r; } @@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) * Try to commit to see if that will free up some * more space. */ - (void) commit_or_fallback(pool); + r = commit(pool); + if (r) + return r; r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); if (r) @@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) * table reload). */ if (!free_blocks) { - DMWARN("%s: no free space available.", + DMWARN("%s: no free data space available.", dm_device_name(pool->pool_md)); spin_lock_irqsave(&pool->lock, flags); pool->no_free_space = 1; @@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) } r = dm_pool_alloc_data_block(pool->pmd, result); - if (r) + if (r) { + if (r == -ENOSPC && + !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && + !free_blocks) { + DMWARN("%s: no free metadata space available.", + dm_device_name(pool->pool_md)); + set_pool_mode(pool, PM_READ_ONLY); + } return r; + } return 0; } @@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool) if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) return; - if (commit_or_fallback(pool)) { + if (commit(pool)) { while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; @@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) case PM_FAIL: DMERR("%s: switching pool to failure mode", dm_device_name(pool->pool_md)); + dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; pool->process_prepared_mapping = process_prepared_mapping_fail; @@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) break; case PM_WRITE: + dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard; pool->process_prepared_mapping = process_prepared_mapping; @@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) struct pool_c *pt = ti->private; /* - * We want to make sure that degraded pools are never upgraded. + * We want to make sure that a pool in PM_FAIL mode is never upgraded. */ enum pool_mode old_mode = pool->pf.mode; enum pool_mode new_mode = pt->adjusted_pf.mode; - if (old_mode > new_mode) + /* + * If we were in PM_FAIL mode, rollback of metadata failed. We're + * not going to recover without a thin_repair. So we never let the + * pool move out of the old mode. On the other hand a PM_READ_ONLY + * may have been due to a lack of metadata or data space, and may + * now work (ie. if the underlying devices have been resized). + */ + if (old_mode == PM_FAIL) new_mode = old_mode; pool->ti = ti; @@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti) return r; if (need_commit1 || need_commit2) - (void) commit_or_fallback(pool); + (void) commit(pool); return 0; } @@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti) cancel_delayed_work(&pool->waker); flush_workqueue(pool->wq); - (void) commit_or_fallback(pool); + (void) commit(pool); } static int check_arg_count(unsigned argc, unsigned args_required) @@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct if (r) return r; - (void) commit_or_fallback(pool); + (void) commit(pool); r = dm_pool_reserve_metadata_snap(pool->pmd); if (r) @@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) DMWARN("Unrecognised thin pool target message received: %s", argv[0]); if (!r) - (void) commit_or_fallback(pool); + (void) commit(pool); return r; } @@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type, /* Commit to ensure statistics aren't out-of-date */ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) - (void) commit_or_fallback(pool); + (void) commit(pool); r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); if (r) { diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c index af96e24..1d75b1d 100644 --- a/drivers/md/persistent-data/dm-array.c +++ b/drivers/md/persistent-data/dm-array.c @@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root, * The shadow op will often be a noop. Only insert if it really * copied data. */ - if (dm_block_location(*block) != b) + if (dm_block_location(*block) != b) { + /* + * dm_tm_shadow_block will have already decremented the old + * block, but it is still referenced by the btree. We + * increment to stop the insert decrementing it below zero + * when overwriting the old value. + */ + dm_tm_inc(info->btree_info.tm, b); r = insert_ablock(info, index, *block, root); + } return r; } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index a7e8bf2..064a3c2 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm) } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); +void dm_bm_set_read_write(struct dm_block_manager *bm) +{ + bm->read_only = false; +} +EXPORT_SYMBOL_GPL(dm_bm_set_read_write); + u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) { return crc32c(~(u32) 0, data, len) ^ init_xor; diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 9a82083..13cd58e 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b); int dm_bm_flush_and_unlock(struct dm_block_manager *bm, struct dm_block *superblock); - /* - * Request data be prefetched into the cache. - */ +/* + * Request data is prefetched into the cache. + */ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); /* @@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); * be returned if you do. */ void dm_bm_set_read_only(struct dm_block_manager *bm); +void dm_bm_set_read_write(struct dm_block_manager *bm); u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 6058569..466a60b 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, } static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, - uint32_t (*mutator)(void *context, uint32_t old), + int (*mutator)(void *context, uint32_t old, uint32_t *new), void *context, enum allocation_event *ev) { int r; @@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, if (old > 2) { r = sm_ll_lookup_big_ref_count(ll, b, &old); - if (r < 0) + if (r < 0) { + dm_tm_unlock(ll->tm, nb); return r; + } } - ref_count = mutator(context, old); + r = mutator(context, old, &ref_count); + if (r) { + dm_tm_unlock(ll->tm, nb); + return r; + } if (ref_count <= 2) { sm_set_bitmap(bm_le, bit, ref_count); @@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, return ll->save_ie(ll, index, &ie_disk); } -static uint32_t set_ref_count(void *context, uint32_t old) +static int set_ref_count(void *context, uint32_t old, uint32_t *new) { - return *((uint32_t *) context); + *new = *((uint32_t *) context); + return 0; } int sm_ll_insert(struct ll_disk *ll, dm_block_t b, @@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); } -static uint32_t inc_ref_count(void *context, uint32_t old) +static int inc_ref_count(void *context, uint32_t old, uint32_t *new) { - return old + 1; + *new = old + 1; + return 0; } int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) @@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); } -static uint32_t dec_ref_count(void *context, uint32_t old) +static int dec_ref_count(void *context, uint32_t old, uint32_t *new) { - return old - 1; + if (!old) { + DMERR_LIMIT("unable to decrement a reference count below 0"); + return -EINVAL; + } + + *new = old - 1; + return 0; } int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 1c95968..58fc1ee 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); int r = sm_metadata_new_block_(sm, b); - if (r) + if (r) { DMERR("unable to allocate new metadata block"); + return r; + } r = sm_metadata_get_nr_free(sm, &count); - if (r) + if (r) { DMERR("couldn't get free block count"); + return r; + } check_threshold(&smm->threshold, count); diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h index d0799e3..9c9063c 100644 --- a/drivers/media/common/siano/smscoreapi.h +++ b/drivers/media/common/siano/smscoreapi.h @@ -955,7 +955,7 @@ struct sms_rx_stats { u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ s32 SNR; /* dB */ u32 ber; /* Post Viterbi ber [1E-5] */ - u32 ber_error_count; /* Number of erronous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ u32 ber_bit_count; /* Total number of SYNC bits. */ u32 ts_per; /* Transport stream PER, 0xFFFFFFFF indicate N/A */ @@ -981,7 +981,7 @@ struct sms_rx_stats_ex { u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ s32 SNR; /* dB */ u32 ber; /* Post Viterbi ber [1E-5] */ - u32 ber_error_count; /* Number of erronous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ u32 ber_bit_count; /* Total number of SYNC bits. */ u32 ts_per; /* Transport stream PER, 0xFFFFFFFF indicate N/A */ diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h index 92c413b..ae36d0a 100644 --- a/drivers/media/common/siano/smsdvb.h +++ b/drivers/media/common/siano/smsdvb.h @@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S { u32 is_demod_locked; /* 0 - not locked, 1 - locked */ u32 ber_bit_count; /* Total number of SYNC bits. */ - u32 ber_error_count; /* Number of erronous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ s32 MRC_SNR; /* dB */ s32 mrc_in_band_pwr; /* In band power in dBM */ diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 58de441..6c7ff0c 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) dprintk_tscheck("TEI detected. " "PID=0x%x data1=0x%x\n", pid, buf[1]); - /* data in this packet cant be trusted - drop it unless + /* data in this packet can't be trusted - drop it unless * module option dvb_demux_feed_err_pkts is set */ if (!dvb_demux_feed_err_pkts) return; @@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed, return -EINVAL; } - if (feed->is_filtering) + if (feed->is_filtering) { + /* release dvbdmx->mutex as far as it is + acquired by stop_filtering() itself */ + mutex_unlock(&dvbdmx->mutex); feed->stop_filtering(feed); + mutex_lock(&dvbdmx->mutex); + } spin_lock_irq(&dvbdmx->lock); f = dvbdmxfeed->filter; diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index 30ee590..65728c2 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val, static int af9033_wr_reg_val_tab(struct af9033_state *state, const struct reg_val *tab, int tab_len) { +#define MAX_TAB_LEN 212 int ret, i, j; - u8 buf[MAX_XFER_SIZE]; + u8 buf[1 + MAX_TAB_LEN]; + + dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); if (tab_len > sizeof(buf)) { - dev_warn(&state->i2c->dev, - "%s: i2c wr len=%d is too big!\n", - KBUILD_MODNAME, tab_len); + dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n", + KBUILD_MODNAME, tab_len); return -EINVAL; } - dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); - for (i = 0, j = 0; i < tab_len; i++) { buf[j] = tab[i].val; diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c index 125a440..5c6ab49 100644 --- a/drivers/media/dvb-frontends/cxd2820r_c.c +++ b/drivers/media/dvb-frontends/cxd2820r_c.c @@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe) num = if_freq / 1000; /* Hz => kHz */ num *= 0x4000; - if_ctl = cxd2820r_div_u64_round_closest(num, 41000); + if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000); buf[0] = (if_ctl >> 8) & 0x3f; buf[1] = (if_ctl >> 0) & 0xff; diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 9053614..6dbbee4 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe) dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */ - /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */ + /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */ *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON); *tune_state = CT_DEMOD_STEP_5; break; @@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe) case CT_DEMOD_STEP_9: /* 39 */ if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */ - /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */ + /* defines timeout for mpeg lock depending on interleaver length of longest layer */ for (i = 0; i < 3; i++) { if (c->layer[i].interleaving >= deeper_interleaver) { dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving); diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index d416c15..bf29a3f 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable) goto error; if (state->m_enable_parallel == true) { - /* paralel -> enable MD1 to MD7 */ + /* parallel -> enable MD1 to MD7 */ status = write16(state, SIO_PDR_MD1_CFG__A, sio_pdr_mdx_cfg); if (status < 0) @@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state) dprintk(1, "\n"); - /* Gracefull shutdown (byte boundaries) */ + /* Graceful shutdown (byte boundaries) */ status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); if (status < 0) goto error; @@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state, fec_oc_dto_burst_len = 204; } - /* Check serial or parrallel output */ + /* Check serial or parallel output */ fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); if (state->m_enable_parallel == false) { /* MPEG data output is serial -> set ipr_mode[0] */ @@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state) goto error; if (count == 1) { - /* Try sampling on a diffrent edge */ + /* Try sampling on a different edge */ u16 clk_neg = 0; status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); @@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state, if (status < 0) goto error; - /* Retreive results parameters from SC */ + /* Retrieve results parameters from SC */ switch (cmd) { /* All commands yielding 5 results */ /* All commands yielding 4 results */ @@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, break; } #if 0 - /* No hierachical channels support in BDA */ + /* No hierarchical channels support in BDA */ /* Priority (only for hierarchical channels) */ switch (channel->priority) { case DRX_PRIORITY_LOW: @@ -4081,7 +4081,7 @@ error: /*============================================================================*/ /** -* \brief Retreive lock status . +* \brief Retrieve lock status . * \param demod Pointer to demodulator instance. * \param lockStat Pointer to lock status structure. * \return DRXStatus_t. @@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state) goto error; /* Stamp driver version number in SCU data RAM in BCD code - Done to enable field application engineers to retreive drxdriver version + Done to enable field application engineers to retrieve drxdriver version via I2C from SCU RAM. Not using SCU command interface for SCU register access since no microcode may be present. @@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe) fe->ops.tuner_ops.get_if_frequency(fe, &IF); start(state, 0, IF); - /* After set_frontend, stats aren't avaliable */ + /* After set_frontend, stats aren't available */ p->strength.stat[0].scale = FE_SCALE_RELATIVE; p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 7efb796..50e8b63 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg, sizeof(priv->tuner_i2c_adapter.name)); priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; priv->tuner_i2c_adapter.algo_data = NULL; + priv->tuner_i2c_adapter.dev.parent = &i2c->dev; i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { dev_err(&i2c->dev, diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h index 4a5b7d2..b253d40 100644 --- a/drivers/media/i2c/adv7183_regs.h +++ b/drivers/media/i2c/adv7183_regs.h @@ -52,9 +52,9 @@ #define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */ #define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */ #define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */ -#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */ -#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */ -#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */ +#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */ +#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */ +#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */ #define ADV7183_POLARITY 0x37 /* Polarity */ #define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */ #define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */ diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index fbfdd2f..a324106b 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd, break; case ADV7604_MODE_HDMI: /* set default prim_mode/vid_std for HDMI - accoring to [REF_03, c. 4.2] */ + according to [REF_03, c. 4.2] */ io_write(sd, 0x00, 0x02); /* video std */ io_write(sd, 0x01, 0x06); /* prim mode */ break; diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 22f729d..b154f36 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd, break; case ADV7842_MODE_HDMI: /* set default prim_mode/vid_std for HDMI - accoring to [REF_03, c. 4.2] */ + according to [REF_03, c. 4.2] */ io_write(sd, 0x00, 0x02); /* video std */ io_write(sd, 0x01, 0x06); /* prim mode */ break; diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index 82bf567..99ee456 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) if (!rc) { /* - * If platform_data doesn't specify rc_dev, initilize it + * If platform_data doesn't specify rc_dev, initialize it * internally */ rc = rc_allocate_device(); diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c index f34429e..a60931e 100644 --- a/drivers/media/i2c/m5mols/m5mols_controls.c +++ b/drivers/media/i2c/m5mols/m5mols_controls.c @@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd) u16 zoom_step; int ret; - /* Determine the firmware dependant control range and step values */ + /* Determine the firmware dependent control range and step values */ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max); if (ret < 0) return ret; diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c index 4734836..1c2303d 100644 --- a/drivers/media/i2c/mt9p031.c +++ b/drivers/media/i2c/mt9p031.c @@ -19,6 +19,7 @@ #include <linux/i2c.h> #include <linux/log2.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pm.h> #include <linux/regulator/consumer.h> diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 6fec938..e7f555c 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd) mutex_unlock(&state->lock); v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n", - __func__, ret ? "failed" : "succeded", ret); + __func__, ret ? "failed" : "succeeded", ret); return ret; } diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h index 9d2c086..9dfa516 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3.h +++ b/drivers/media/i2c/s5c73m3/s5c73m3.h @@ -393,7 +393,7 @@ struct s5c73m3 { /* External master clock frequency */ u32 mclk_frequency; - /* Video bus type - MIPI-CSI2/paralell */ + /* Video bus type - MIPI-CSI2/parallel */ enum v4l2_mbus_type bus_type; const struct s5c73m3_frame_size *sensor_pix_size[2]; diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index 637d026..afdbcb0 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state, * the analog demod. * If the tuner is not found, it returns -ENODEV. * If auto-detection is disabled and the tuner doesn't match what it was - * requred, it returns -EINVAL and fills 'name'. + * required, it returns -EINVAL and fills 'name'. * If the chip is found, it returns the chip ID and fills 'name'. */ static int saa711x_detect_chip(struct i2c_client *client, diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c index 0a5c5d4..d2daa6a 100644 --- a/drivers/media/i2c/soc_camera/ov5642.c +++ b/drivers/media/i2c/soc_camera/ov5642.c @@ -642,7 +642,7 @@ static const struct ov5642_datafmt static int reg_read(struct i2c_client *client, u16 reg, u8 *val) { int ret; - /* We have 16-bit i2c addresses - care for endianess */ + /* We have 16-bit i2c addresses - care for endianness */ unsigned char data[2] = { reg >> 8, reg & 0xff }; ret = i2c_master_send(client, data, 2); diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c index 42276d9..ed9ae88 100644 --- a/drivers/media/i2c/ths7303.c +++ b/drivers/media/i2c/ths7303.c @@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val) } /* following function is used to set ths7303 */ -int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode) +static int ths7303_setval(struct v4l2_subdev *sd, + enum ths7303_filter_mode mode) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ths7303_state *state = to_state(sd); diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c index 3f584a7..bee7946 100644 --- a/drivers/media/i2c/wm8775.c +++ b/drivers/media/i2c/wm8775.c @@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd, return -EINVAL; } state->input = input; - if (!v4l2_ctrl_g_ctrl(state->mute)) + if (v4l2_ctrl_g_ctrl(state->mute)) return 0; if (!v4l2_ctrl_g_ctrl(state->vol)) return 0; - if (!v4l2_ctrl_g_ctrl(state->bal)) - return 0; wm8775_set_audio(sd, 1); return 0; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index a3b1ee9..92a06fd 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) } btv->std = V4L2_STD_PAL; init_irqreg(btv); - v4l2_ctrl_handler_setup(hdl); + if (!bttv_tvcards[btv->c.type].no_video) + v4l2_ctrl_handler_setup(hdl); if (hdl->error) { result = hdl->error; goto fail2; diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index 2767c64..57f4688 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -262,7 +262,7 @@ struct cx18_options { }; /* per-mdl bit flags */ -#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */ +#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */ /* per-stream, s_flags */ #define CX18_F_S_CLAIMED 3 /* this stream is claimed */ diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index e3fc2c7..95666ee 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value) cx_write(MC417_RWD, regval); /* Transition RD to effect read transaction across bus. - * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? + * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its * input only...) */ diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c index 8164d74..655d6854 100644 --- a/drivers/media/pci/pluto2/pluto2.c +++ b/drivers/media/pci/pluto2/pluto2.c @@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto) /* set automatic LED control by FPGA */ pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED); - /* set data endianess */ + /* set data endianness */ #ifdef __LITTLE_ENDIAN pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END); #else diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 57ef545..1bf0697 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev, if (fw_debug) { dev->kthread = kthread_run(saa7164_thread_function, dev, "saa7164 debug"); - if (!dev->kthread) + if (IS_ERR(dev->kthread)) { + dev->kthread = NULL; printk(KERN_ERR "%s() Failed to create " "debug kernel thread\n", __func__); + } } } /* != BOARD_UNKNOWN */ diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index bd72fb9..61f3dbc 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb) if (q_data->fourcc == V4L2_PIX_FMT_H264 && vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { /* - * For backwards compatiblity, queuing an empty buffer marks + * For backwards compatibility, queuing an empty buffer marks * the stream end */ if (vb2_get_plane_payload(vb, 0) == 0) diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index 3d66d88..f791569 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev) dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); - /* Enable clocks and perform basic initalization */ + /* Enable clocks and perform basic initialization */ clk_enable(fimc->clock[CLK_GATE]); fimc_hw_reset(fimc); diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 7a4ee4c..c1bce17 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd, goto dev_unlock; drvdata = dev_get_drvdata(dev); - /* Some subdev didn't probe succesfully id drvdata is NULL */ + /* Some subdev didn't probe successfully id drvdata is NULL */ if (drvdata) { switch (plat_entity) { case IDX_FIMC: diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index 3458fa0..054507f 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam) struct mmp_camera *cam = mcam_to_cam(mcam); struct mmp_camera_platform_data *pdata; - if (mcam->bus_type == V4L2_MBUS_CSI2) { - cam->mipi_clk = devm_clk_get(mcam->dev, "mipi"); - if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0)) - return PTR_ERR(cam->mipi_clk); - } - /* * Turn on power and clocks to the controller. */ @@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam) gpio_set_value(pdata->sensor_power_gpio, 0); gpio_set_value(pdata->sensor_reset_gpio, 0); - if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) { - if (cam->mipi_clk) - devm_clk_put(mcam->dev, cam->mipi_clk); - cam->mipi_clk = NULL; - } - mcam_clk_disable(mcam); } @@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam) return; /* get the escape clk, this is hard coded */ + clk_prepare_enable(cam->mipi_clk); tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12; - + clk_disable_unprepare(cam->mipi_clk); /* * dphy[2] - CSI2_DPHY6: * bit 0 ~ bit 7: CK Term Enable @@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data) return IRQ_RETVAL(handled); } -static void mcam_deinit_clk(struct mcam_camera *mcam) -{ - unsigned int i; - - for (i = 0; i < NR_MCAM_CLK; i++) { - if (!IS_ERR(mcam->clk[i])) { - if (mcam->clk[i]) - devm_clk_put(mcam->dev, mcam->clk[i]); - } - mcam->clk[i] = NULL; - } -} - static void mcam_init_clk(struct mcam_camera *mcam) { unsigned int i; @@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev) if (cam == NULL) return -ENOMEM; cam->pdev = pdev; - cam->mipi_clk = NULL; INIT_LIST_HEAD(&cam->devlist); mcam = &cam->mcam; @@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev) mcam->mclk_div = pdata->mclk_div; mcam->bus_type = pdata->bus_type; mcam->dphy = pdata->dphy; + if (mcam->bus_type == V4L2_MBUS_CSI2) { + cam->mipi_clk = devm_clk_get(mcam->dev, "mipi"); + if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0)) + return PTR_ERR(cam->mipi_clk); + } mcam->mipi_enabled = false; mcam->lane = pdata->lane; mcam->chip_id = MCAM_ARMADA610; @@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev) */ ret = mmpcam_power_up(mcam); if (ret) - goto out_deinit_clk; + return ret; ret = mccic_register(mcam); if (ret) goto out_power_down; @@ -469,8 +449,6 @@ out_unregister: mccic_shutdown(mcam); out_power_down: mmpcam_power_down(mcam); -out_deinit_clk: - mcam_deinit_clk(mcam); return ret; } @@ -478,18 +456,10 @@ out_deinit_clk: static int mmpcam_remove(struct mmp_camera *cam) { struct mcam_camera *mcam = &cam->mcam; - struct mmp_camera_platform_data *pdata; mmpcam_remove_device(cam); mccic_shutdown(mcam); mmpcam_power_down(mcam); - pdata = cam->pdev->dev.platform_data; - gpio_free(pdata->sensor_reset_gpio); - gpio_free(pdata->sensor_power_gpio); - mcam_deinit_clk(mcam); - iounmap(cam->power_regs); - iounmap(mcam->regs); - kfree(cam); return 0; } diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 1c36080..561bce8 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp) * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in * resume(), and the the pipelines are restarted in complete(). * - * TODO: PM dependencies between the ISP and sensors are not modeled explicitly + * TODO: PM dependencies between the ISP and sensors are not modelled explicitly * yet. */ static int isp_pm_prepare(struct device *dev) diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index a908d00..f6304bb 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) if (subdev == NULL) return -EINVAL; - mutex_lock(&video->mutex); - fmt.pad = pad; fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); - if (ret == -ENOIOCTLCMD) - ret = -EINVAL; + mutex_lock(&video->mutex); + ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); mutex_unlock(&video->mutex); if (ret) diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h index 9319e93..6ccc3f8 100644 --- a/drivers/media/platform/s5p-mfc/regs-mfc.h +++ b/drivers/media/platform/s5p-mfc/regs-mfc.h @@ -382,7 +382,7 @@ #define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 #define S5P_FIMV_R2H_CMD_ERR_RET 32 -/* Dummy definition for MFCv6 compatibilty */ +/* Dummy definition for MFCv6 compatibility */ #define S5P_FIMV_CODEC_H264_MVC_DEC -1 #define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1 #define S5P_FIMV_MFC_RESET -1 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 5f2c4ad..e46067a 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); /* Copy timestamp / timecode from decoded src to dst and set - appropraite flags */ + appropriate flags */ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); list_for_each_entry(dst_buf, &ctx->dst_queue, list) { if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { @@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, case MFCINST_FINISHING: case MFCINST_FINISHED: case MFCINST_RUNNING: - /* It is higly probable that an error occured + /* It is highly probable that an error occurred * while decoding a frame */ clear_work_bit(ctx); ctx->state = MFCINST_ERROR; @@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); switch (reason) { case S5P_MFC_R2H_CMD_ERR_RET: - /* An error has occured */ + /* An error has occurred */ if (ctx->state == MFCINST_RUNNING && s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= dev->warn_start) @@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file) mutex_unlock(&dev->mfc_mutex); mfc_debug_leave(); return ret; - /* Deinit when failure occured */ + /* Deinit when failure occurred */ err_queue_init: if (dev->num_inst == 1) s5p_mfc_deinit_hw(dev); @@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file) /* Mark context as idle */ clear_work_bit_irqsave(ctx); /* If instance was initialised then - * return instance and free reosurces */ + * return instance and free resources */ if (ctx->inst_no != MFC_NO_INSTANCE_SET) { mfc_debug(2, "Has to free instance\n"); ctx->state = MFCINST_RETURN_INST; set_work_bit_irqsave(ctx); s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); - /* Wait until instance is returned or timeout occured */ + /* Wait until instance is returned or timeout occurred */ if (s5p_mfc_wait_for_done_ctx (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { s5p_mfc_clock_off(); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 7cab684..2475a3c 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) } else { /* In this case bank2 can point to the same address as bank1. - * Firmware will always occupy the beggining of this area so it is + * Firmware will always occupy the beginning of this area so it is * impossible having a video frame buffer with zero address. */ dev->bank2 = dev->bank1; } diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h index 04e6490..fb2acc5 100644 --- a/drivers/media/platform/s5p-tv/mixer.h +++ b/drivers/media/platform/s5p-tv/mixer.h @@ -65,7 +65,7 @@ struct mxr_format { int num_subframes; /** specifies to which subframe belong given plane */ int plane2subframe[MXR_MAX_PLANES]; - /** internal code, driver dependant */ + /** internal code, driver dependent */ unsigned long cookie; }; diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c index 641b1f0..81b97db 100644 --- a/drivers/media/platform/s5p-tv/mixer_video.c +++ b/drivers/media/platform/s5p-tv/mixer_video.c @@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh, mutex_lock(&mdev->mutex); /* timings change cannot be done while there is an entity - * dependant on output configuration + * dependent on output configuration */ if (mdev->n_output > 0) { mutex_unlock(&mdev->mutex); @@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm) mutex_lock(&mdev->mutex); /* standard change cannot be done while there is an entity - * dependant on output configuration + * dependent on output configuration */ if (mdev->n_output > 0) { mutex_unlock(&mdev->mutex); diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index 6769193..74ce8b6 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c @@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd) if (ctrlclock & LCLK_EN) CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); - /* select bus endianess */ + /* select bus endianness */ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); fmt = xlate->host_fmt; diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c index 1d3f119..2d4e73b 100644 --- a/drivers/media/platform/vivi.c +++ b/drivers/media/platform/vivi.c @@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i) return 0; } -/* timeperframe is arbitrary and continous */ +/* timeperframe is arbitrary and continuous */ static int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival) { @@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv, fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; - /* fill in stepwise (step=1.0 is requred by V4L2 spec) */ + /* fill in stepwise (step=1.0 is required by V4L2 spec) */ fival->stepwise.min = tpf_min; fival->stepwise.max = tpf_max; fival->stepwise.step = (struct v4l2_fract) {1, 1}; diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 1c9e771..d16bf0f 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1) * Increment the VSP1 reference count and initialize the device if the first * reference is taken. * - * Return a pointer to the VSP1 device or NULL if an error occured. + * Return a pointer to the VSP1 device or NULL if an error occurred. */ struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1) { diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 714c53e..4b0ac07 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf) /* ... and the buffers queue... */ video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev); - if (IS_ERR(video->alloc_ctx)) + if (IS_ERR(video->alloc_ctx)) { + ret = PTR_ERR(video->alloc_ctx); goto error; + } video->queue.type = video->type; video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index 3db8a8c..050b3bb 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } -#ifdef CONFIG_PM -static void shark_resume_leds(struct shark_device *shark) +static inline void shark_resume_leds(struct shark_device *shark) { if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) set_bit(BLUE_PULSE_LED, &shark->brightness_new); @@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark) set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } -#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index d86d90d..8654e0d 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } -#ifdef CONFIG_PM -static void shark_resume_leds(struct shark_device *shark) +static inline void shark_resume_leds(struct shark_device *shark) { int i; @@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark) schedule_work(&shark->led_work); } -#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 9c9084c..2fd9009 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -268,8 +268,8 @@ struct si476x_radio; * * @tune_freq: Tune chip to a specific frequency * @seek_start: Star station seeking - * @rsq_status: Get Recieved Signal Quality(RSQ) status - * @rds_blckcnt: Get recived RDS blocks count + * @rsq_status: Get Received Signal Quality(RSQ) status + * @rds_blckcnt: Get received RDS blocks count * @phase_diversity: Change phase diversity mode of the tuner * @phase_div_status: Get phase diversity mode status * @acf_status: Get the status of Automatically Controlled diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 036e2f5..3ed1f56 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c @@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv, So we keep it as-is. */ return -EINVAL; } - clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); + freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); tea5764_power_up(radio); tea5764_tune(radio, (freq * 125) / 2); return 0; diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 69e3245..a9319a2 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c @@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen if (f->tuner != 0) return -EINVAL; - clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); + freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; i2cmsg[1] = (pll >> 8) & 0xff; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 72e3fa6..f329485 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates * 0x688301b7 and the right one 0x688481b7. All other keys generate * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with - * reversed endianess. Extract direction from buffer, rotate endianess, + * reversed endianness. Extract direction from buffer, rotate endianness, * adjust sign and feed the values into stabilize(). The resulting codes * will be 0x01008000, 0x01007F00, which match the newer devices. */ diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 094484f..a5d4f88 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -118,7 +118,7 @@ static int debug; #define RR3_IR_IO_LENGTH_FUZZ 0x04 /* Timeout for end of signal detection */ #define RR3_IR_IO_SIG_TIMEOUT 0x05 -/* Minumum value for pause recognition. */ +/* Minimum value for pause recognition. */ #define RR3_IR_IO_MIN_PAUSE 0x06 /* Clock freq. of EZ-USB chip */ diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c index 2e1a02e..20cca40 100644 --- a/drivers/media/tuners/mt2063.c +++ b/drivers/media/tuners/mt2063.c @@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state, * DNC Output is selected, the other is always off) * * @state: ptr to mt2063_state structure - * @Mode: desired reciever delivery system + * @Mode: desired receiver delivery system * * Note: Register cache must be valid for it to work */ @@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe, /* * As defined on EN 300 429, the DVB-C roll-off factor is 0.15. - * So, the amount of the needed bandwith is given by: + * So, the amount of the needed bandwidth is given by: * Bw = Symbol_rate * (1 + 0.15) * As such, the maximum symbol rate supported by 6 MHz is given by: * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h index 74dc46a..7e47987 100644 --- a/drivers/media/tuners/tuner-xc2028-types.h +++ b/drivers/media/tuners/tuner-xc2028-types.h @@ -119,7 +119,7 @@ #define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B) #define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B) -/* To preserve backward compatibilty, +/* To preserve backward compatibility, (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported */ diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index e9d017b..528cce9 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1412,8 +1412,8 @@ err_v4l2: usb_set_intfdata(interface, NULL); err_if: usb_put_dev(udev); - kfree(dev); clear_bit(dev->devno, &cx231xx_devused); + kfree(dev); return retval; } diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index c8fcd78..8f9b2cea 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) { u8 wbuf[MAX_XFER_SIZE]; u8 mbox = (reg >> 16) & 0xff; - struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; + struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL }; if (6 + len > sizeof(wbuf)) { dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", @@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, } else { /* I2C */ u8 buf[MAX_XFER_SIZE]; - struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), + struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len, buf, msg[1].len, msg[1].buf }; if (5 + msg[0].len > sizeof(buf)) { dev_warn(&d->udev->dev, "%s: i2c xfer: len=%d is too big!\n", KBUILD_MODNAME, msg[0].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[1].len; @@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, } else { /* I2C */ u8 buf[MAX_XFER_SIZE]; - struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, - 0, NULL }; + struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len, + buf, 0, NULL }; if (5 + msg[0].len > sizeof(buf)) { dev_warn(&d->udev->dev, "%s: i2c xfer: len=%d is too big!\n", KBUILD_MODNAME, msg[0].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[0].len; @@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } +unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) @@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = { /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, + { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, + &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, af9035_id_table); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index 2627553..08240e4 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe) struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; int err; - /* exit if we didnt initialize the driver yet */ + /* exit if we didn't initialize the driver yet */ if (!state->chip_id) { mxl_debug("driver not yet initialized, exit."); goto fail; @@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe) struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; int err; - /* exit if we didnt initialize the driver yet */ + /* exit if we didn't initialize the driver yet */ if (!state->chip_id) { mxl_debug("driver not yet initialized, exit."); goto fail; diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 40832a1..98d24ae 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, if (rxlen > 62) { err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); - txlen = 62; + rxlen = 62; } b[0] = I2C_SPEED_100KHZ_BIT; diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index fc5d60e..dd19c9f 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp) em28xx_videodbg("users=%d\n", dev->users); - mutex_lock(&dev->lock); vb2_fop_release(filp); + mutex_lock(&dev->lock); if (dev->users == 1) { /* the device is already disconnect, diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c index cb1e64c..cea8d7f 100644 --- a/drivers/media/usb/gspca/gl860/gl860.c +++ b/drivers/media/usb/gspca/gl860/gl860.c @@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, s32 nToSkip = sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); - /* Test only against 0202h, so endianess does not matter */ + /* Test only against 0202h, so endianness does not matter */ switch (*(s16 *) data) { case 0x0202: /* End of frame, start a new one */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c index cd79c18..07529e5 100644 --- a/drivers/media/usb/gspca/pac207.c +++ b/drivers/media/usb/gspca/pac207.c @@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ - int len) /* interrput packet length */ + int len) /* interrupt packet length */ { int ret = -EINVAL; diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c index a915096..2fd1c5e 100644 --- a/drivers/media/usb/gspca/pac7302.c +++ b/drivers/media/usb/gspca/pac7302.c @@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev, #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ - int len) /* interrput packet length */ + int len) /* interrupt packet length */ { int ret = -EINVAL; u8 data0, data1; diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c index 1fc80af..48234c9 100644 --- a/drivers/media/usb/gspca/stk1135.c +++ b/drivers/media/usb/gspca/stk1135.c @@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev) /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); + + /* wait a while for sensor to catch up */ + udelay(1000); } static void stk1135_camera_disable(struct gspca_dev *gspca_dev) diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c index 9c08276..7f94ec7 100644 --- a/drivers/media/usb/gspca/stv0680.c +++ b/drivers/media/usb/gspca/stv0680.c @@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev, struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; - /* Give the camera some time to settle, otherwise initalization will + /* Give the camera some time to settle, otherwise initialization will fail on hotplug, and yes it really needs a full second. */ msleep(1000); diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index a517d18..46c9f22 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)}, {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)}, {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)}, + {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)}, {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)}, {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)}, {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)}, diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c index 7b95d8e..d3e1b6d 100644 --- a/drivers/media/usb/gspca/zc3xx.c +++ b/drivers/media/usb/gspca/zc3xx.c @@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ - int len) /* interrput packet length */ + int len) /* interrupt packet length */ { if (len == 8 && data[4] == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 77bbf78..78c9bc8 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id /* Set the leds off */ pwc_set_leds(pdev, 0, 0); - /* Setup intial videomode */ + /* Setup initial videomode */ rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, V4L2_PIX_FMT_YUV420, 30, &compression, 1); if (rc) diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index 8a505a9..6222a4a 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c @@ -50,13 +50,8 @@ #define USBTV_ISOC_TRANSFERS 16 #define USBTV_ISOC_PACKETS 8 -#define USBTV_WIDTH 720 -#define USBTV_HEIGHT 480 - #define USBTV_CHUNK_SIZE 256 #define USBTV_CHUNK 240 -#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ - / 4 / USBTV_CHUNK) /* Chunk header. */ #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ @@ -65,6 +60,27 @@ #define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) #define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) +#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL) + +/* parameters for supported TV norms */ +struct usbtv_norm_params { + v4l2_std_id norm; + int cap_width, cap_height; +}; + +static struct usbtv_norm_params norm_params[] = { + { + .norm = V4L2_STD_525_60, + .cap_width = 720, + .cap_height = 480, + }, + { + .norm = V4L2_STD_PAL, + .cap_width = 720, + .cap_height = 576, + } +}; + /* A single videobuf2 frame buffer. */ struct usbtv_buf { struct vb2_buffer vb; @@ -94,11 +110,38 @@ struct usbtv { USBTV_COMPOSITE_INPUT, USBTV_SVIDEO_INPUT, } input; + v4l2_std_id norm; + int width, height; + int n_chunks; int iso_size; unsigned int sequence; struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; }; +static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm) +{ + int i, ret = 0; + struct usbtv_norm_params *params = NULL; + + for (i = 0; i < ARRAY_SIZE(norm_params); i++) { + if (norm_params[i].norm & norm) { + params = &norm_params[i]; + break; + } + } + + if (params) { + usbtv->width = params->cap_width; + usbtv->height = params->cap_height; + usbtv->n_chunks = usbtv->width * usbtv->height + / 4 / USBTV_CHUNK; + usbtv->norm = params->norm; + } else + ret = -EINVAL; + + return ret; +} + static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) { int ret; @@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input) return ret; } +static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm) +{ + int ret; + static const u16 pal[][2] = { + { USBTV_BASE + 0x001a, 0x0068 }, + { USBTV_BASE + 0x010e, 0x0072 }, + { USBTV_BASE + 0x010f, 0x00a2 }, + { USBTV_BASE + 0x0112, 0x00b0 }, + { USBTV_BASE + 0x0117, 0x0001 }, + { USBTV_BASE + 0x0118, 0x002c }, + { USBTV_BASE + 0x012d, 0x0010 }, + { USBTV_BASE + 0x012f, 0x0020 }, + { USBTV_BASE + 0x024f, 0x0002 }, + { USBTV_BASE + 0x0254, 0x0059 }, + { USBTV_BASE + 0x025a, 0x0016 }, + { USBTV_BASE + 0x025b, 0x0035 }, + { USBTV_BASE + 0x0263, 0x0017 }, + { USBTV_BASE + 0x0266, 0x0016 }, + { USBTV_BASE + 0x0267, 0x0036 } + }; + + static const u16 ntsc[][2] = { + { USBTV_BASE + 0x001a, 0x0079 }, + { USBTV_BASE + 0x010e, 0x0068 }, + { USBTV_BASE + 0x010f, 0x009c }, + { USBTV_BASE + 0x0112, 0x00f0 }, + { USBTV_BASE + 0x0117, 0x0000 }, + { USBTV_BASE + 0x0118, 0x00fc }, + { USBTV_BASE + 0x012d, 0x0004 }, + { USBTV_BASE + 0x012f, 0x0008 }, + { USBTV_BASE + 0x024f, 0x0001 }, + { USBTV_BASE + 0x0254, 0x005f }, + { USBTV_BASE + 0x025a, 0x0012 }, + { USBTV_BASE + 0x025b, 0x0001 }, + { USBTV_BASE + 0x0263, 0x001c }, + { USBTV_BASE + 0x0266, 0x0011 }, + { USBTV_BASE + 0x0267, 0x0005 } + }; + + ret = usbtv_configure_for_norm(usbtv, norm); + + if (!ret) { + if (norm & V4L2_STD_525_60) + ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc)); + else if (norm & V4L2_STD_PAL) + ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal)); + } + + return ret; +} + static int usbtv_setup_capture(struct usbtv *usbtv) { int ret; @@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv) { USBTV_BASE + 0x0284, 0x0088 }, { USBTV_BASE + 0x0003, 0x0004 }, - { USBTV_BASE + 0x001a, 0x0079 }, { USBTV_BASE + 0x0100, 0x00d3 }, - { USBTV_BASE + 0x010e, 0x0068 }, - { USBTV_BASE + 0x010f, 0x009c }, - { USBTV_BASE + 0x0112, 0x00f0 }, { USBTV_BASE + 0x0115, 0x0015 }, - { USBTV_BASE + 0x0117, 0x0000 }, - { USBTV_BASE + 0x0118, 0x00fc }, - { USBTV_BASE + 0x012d, 0x0004 }, - { USBTV_BASE + 0x012f, 0x0008 }, { USBTV_BASE + 0x0220, 0x002e }, { USBTV_BASE + 0x0225, 0x0008 }, { USBTV_BASE + 0x024e, 0x0002 }, - { USBTV_BASE + 0x024f, 0x0001 }, - { USBTV_BASE + 0x0254, 0x005f }, - { USBTV_BASE + 0x025a, 0x0012 }, - { USBTV_BASE + 0x025b, 0x0001 }, - { USBTV_BASE + 0x0263, 0x001c }, - { USBTV_BASE + 0x0266, 0x0011 }, - { USBTV_BASE + 0x0267, 0x0005 }, { USBTV_BASE + 0x024e, 0x0002 }, { USBTV_BASE + 0x024f, 0x0002 }, }; @@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv) if (ret) return ret; + ret = usbtv_select_norm(usbtv, usbtv->norm); + if (ret) + return ret; + ret = usbtv_select_input(usbtv, usbtv->input); if (ret) return ret; @@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) frame_id = USBTV_FRAME_ID(chunk); odd = USBTV_ODD(chunk); chunk_no = USBTV_CHUNK_NO(chunk); - if (chunk_no >= USBTV_CHUNKS) + if (chunk_no >= usbtv->n_chunks) return; /* Beginning of a frame. */ @@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) usbtv->chunks_done++; /* Last chunk in a frame, signalling an end */ - if (odd && chunk_no == USBTV_CHUNKS-1) { + if (odd && chunk_no == usbtv->n_chunks-1) { int size = vb2_plane_size(&buf->vb, 0); enum vb2_buffer_state state = usbtv->chunks_done == - USBTV_CHUNKS ? + usbtv->n_chunks ? VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR; @@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv, static int usbtv_enum_input(struct file *file, void *priv, struct v4l2_input *i) { + struct usbtv *dev = video_drvdata(file); + switch (i->index) { case USBTV_COMPOSITE_INPUT: strlcpy(i->name, "Composite", sizeof(i->name)); @@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv, } i->type = V4L2_INPUT_TYPE_CAMERA; - i->std = V4L2_STD_525_60; + i->std = dev->vdev.tvnorms; return 0; } @@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv, static int usbtv_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { - f->fmt.pix.width = USBTV_WIDTH; - f->fmt.pix.height = USBTV_HEIGHT; + struct usbtv *usbtv = video_drvdata(file); + + f->fmt.pix.width = usbtv->width; + f->fmt.pix.height = usbtv->height; f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; f->fmt.pix.field = V4L2_FIELD_INTERLACED; - f->fmt.pix.bytesperline = USBTV_WIDTH * 2; + f->fmt.pix.bytesperline = usbtv->width * 2; f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height); f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; - f->fmt.pix.priv = 0; + return 0; } static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm) { - *norm = V4L2_STD_525_60; + struct usbtv *usbtv = video_drvdata(file); + *norm = usbtv->norm; return 0; } +static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm) +{ + int ret = -EINVAL; + struct usbtv *usbtv = video_drvdata(file); + + if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL)) + ret = usbtv_select_norm(usbtv, norm); + + return ret; +} + static int usbtv_g_input(struct file *file, void *priv, unsigned int *i) { struct usbtv *usbtv = video_drvdata(file); @@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i) return usbtv_select_input(usbtv, i); } -static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm) -{ - if (norm & V4L2_STD_525_60) - return 0; - return -EINVAL; -} - struct v4l2_ioctl_ops usbtv_ioctl_ops = { .vidioc_querycap = usbtv_querycap, .vidioc_enum_input = usbtv_enum_input, @@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq, const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { + struct usbtv *usbtv = vb2_get_drv_priv(vq); + if (*nbuffers < 2) *nbuffers = 2; *nplanes = 1; - sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); + sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32); return 0; } @@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf, return -ENOMEM; usbtv->dev = dev; usbtv->udev = usb_get_dev(interface_to_usbdev(intf)); + usbtv->iso_size = size; + + (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60); + spin_lock_init(&usbtv->buflock); mutex_init(&usbtv->v4l2_lock); mutex_init(&usbtv->vb2q_lock); @@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf, usbtv->vdev.release = video_device_release_empty; usbtv->vdev.fops = &usbtv_fops; usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops; - usbtv->vdev.tvnorms = V4L2_STD_525_60; + usbtv->vdev.tvnorms = USBTV_TV_STD; usbtv->vdev.queue = &usbtv->vb2q; usbtv->vdev.lock = &usbtv->v4l2_lock; set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 899cb6d..898c208 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample) * * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) * - * to avoid loosing precision in the division. Similarly, the host timestamp is + * to avoid losing precision in the division. Similarly, the host timestamp is * computed with * * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 60dcc0f..fb46790 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id) "Advanced Simple", "Core", "Simple Scalable", - "Advanced Coding Efficency", + "Advanced Coding Efficiency", NULL, }; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index b19b306..0edc165 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) } /** + * __setup_lengths() - setup initial lengths for every plane in + * every buffer on the queue + */ +static void __setup_lengths(struct vb2_queue *q, unsigned int n) +{ + unsigned int buffer, plane; + struct vb2_buffer *vb; + + for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { + vb = q->bufs[buffer]; + if (!vb) + continue; + + for (plane = 0; plane < vb->num_planes; ++plane) + vb->v4l2_planes[plane].length = q->plane_sizes[plane]; + } +} + +/** * __setup_offsets() - setup unique offsets ("cookies") for every plane in * every buffer on the queue */ @@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n) continue; for (plane = 0; plane < vb->num_planes; ++plane) { - vb->v4l2_planes[plane].length = q->plane_sizes[plane]; vb->v4l2_planes[plane].m.mem_offset = off; dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", @@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, q->bufs[q->num_buffers + buffer] = vb; } + __setup_lengths(q, buffer); if (memory == V4L2_MEMORY_MMAP) __setup_offsets(q, buffer); @@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) return -EINVAL; } - if (eb->flags & ~O_CLOEXEC) { - dprintk(1, "Queue does support only O_CLOEXEC flag\n"); + if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { + dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n"); return -EINVAL; } @@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) vb_plane = &vb->planes[eb->plane]; - dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); + dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); if (IS_ERR_OR_NULL(dbuf)) { dprintk(1, "Failed to export buffer %d, plane %d\n", eb->index, eb->plane); return -EINVAL; } - ret = dma_buf_fd(dbuf, eb->flags); + ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); if (ret < 0) { dprintk(3, "buffer %d, plane %d failed to export (%d)\n", eb->index, eb->plane, ret); diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 646f08f..33d3871d 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) return sgt; } -static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) +static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) { struct vb2_dc_buf *buf = buf_priv; struct dma_buf *dbuf; @@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) if (WARN_ON(!buf->sgt_base)) return NULL; - dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); + dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags); if (IS_ERR(dbuf)) return NULL; diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 2f86054..0d3a8ff 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) - return NULL; + goto userptr_fail_alloc_pages; num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, @@ -204,6 +204,7 @@ userptr_fail_get_user_pages: while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); kfree(buf->pages); +userptr_fail_alloc_pages: kfree(buf); return NULL; } diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index 34c18fb..54cc255 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = { int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) { - return regmap_read(sec_pmic->regmap, reg, dest); + return regmap_read(sec_pmic->regmap_pmic, reg, dest); } EXPORT_SYMBOL_GPL(sec_reg_read); int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) { - return regmap_bulk_read(sec_pmic->regmap, reg, buf, count); + return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count); } EXPORT_SYMBOL_GPL(sec_bulk_read); int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value) { - return regmap_write(sec_pmic->regmap, reg, value); + return regmap_write(sec_pmic->regmap_pmic, reg, value); } EXPORT_SYMBOL_GPL(sec_reg_write); int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) { - return regmap_raw_write(sec_pmic->regmap, reg, buf, count); + return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count); } EXPORT_SYMBOL_GPL(sec_bulk_write); int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask) { - return regmap_update_bits(sec_pmic->regmap, reg, mask, val); + return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val); } EXPORT_SYMBOL_GPL(sec_reg_update); @@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = { .cache_type = REGCACHE_FLAT, }; +static const struct regmap_config sec_rtc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + #ifdef CONFIG_OF /* * Only the common platform data elements for s5m8767 are parsed here from the @@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c, break; } - sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap); - if (IS_ERR(sec_pmic->regmap)) { - ret = PTR_ERR(sec_pmic->regmap); + sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap); + if (IS_ERR(sec_pmic->regmap_pmic)) { + ret = PTR_ERR(sec_pmic->regmap_pmic); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); return ret; @@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c, sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); i2c_set_clientdata(sec_pmic->rtc, sec_pmic); + sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, + &sec_rtc_regmap_config); + if (IS_ERR(sec_pmic->regmap_rtc)) { + ret = PTR_ERR(sec_pmic->regmap_rtc); + dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n", + ret); + return ret; + } + if (pdata && pdata->cfg_pmic_irq) pdata->cfg_pmic_irq(); diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c index 0dd84e9..b441b1b 100644 --- a/drivers/mfd/sec-irq.c +++ b/drivers/mfd/sec-irq.c @@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) switch (type) { case S5M8763X: - ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, sec_pmic->irq_base, &s5m8763_irq_chip, &sec_pmic->irq_data); break; case S5M8767X: - ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, sec_pmic->irq_base, &s5m8767_irq_chip, &sec_pmic->irq_data); break; case S2MPS11X: - ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, sec_pmic->irq_base, &s2mps11_irq_chip, &sec_pmic->irq_data); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 4cabdc9..4b3aaa8 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) { struct platform_device *pdev = info->pdev; - if (use_dma) { + if (info->use_dma) { pxa_free_dma(info->data_dma_ch); dma_free_coherent(&pdev->dev, info->buf_size, info->data_buff, info->data_buff_phys); @@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = { .compatible = "marvell,pxa3xx-nand", .data = (void *)PXA3XX_NAND_VARIANT_PXA, }, - { - .compatible = "marvell,armada370-nand", - .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, - }, {} }; MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 36eab0c..398e299 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params) (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { /* not complete check, but should be good enough to catch mistakes */ - __be32 ip = in_aton(arp_ip_target[i]); - if (!isdigit(arp_ip_target[i][0]) || ip == 0 || - ip == htonl(INADDR_BROADCAST)) { + __be32 ip; + if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || + IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", arp_ip_target[i]); arp_interval = 0; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index abf5e10..0ae580b 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1635,12 +1635,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d, char *buf) { struct bonding *bond = to_bond(d); - int packets_per_slave = bond->params.packets_per_slave; + unsigned int packets_per_slave = bond->params.packets_per_slave; if (packets_per_slave > 1) packets_per_slave = reciprocal_value(packets_per_slave); - return sprintf(buf, "%d\n", packets_per_slave); + return sprintf(buf, "%u\n", packets_per_slave); } static ssize_t bonding_store_packets_per_slave(struct device *d, diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 5f9a7ad..8aeec0b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev) usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); + usb_free_urb(urb); break; } @@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne * allowed (MAX_TX_URBS). */ if (!context) { - usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); + usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context\n"); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 8ee9d15..263dd92 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) /* set LED in default state (end of init phase) */ pcan_usb_pro_set_led(dev, 0, 1); + kfree(bi); + kfree(fi); + return 0; err_out: diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 50b853a..46dfb13 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev) if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); - if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, - 0, dev->name, dev)) + if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev)) return -EAGAIN; /* Initialize EMAC board */ @@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev) emac_shutdown(ndev); + free_irq(ndev->irq, ndev); + return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 0216d59..2e46c28 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3114,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + if (!IS_SRIOV(bp)) { + BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); + return -EINVAL; + } + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", num_vfs_param, BNX2X_NR_VIRTFN(bp)); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 369b736..f3dd93b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp) void (*write_op)(struct tg3 *, u32, u32); int i, err; + if (!pci_device_is_present(tp->pdev)) + return -ENODEV; + tg3_nvram_lock(tp); tg3_ape_lock(tp, TG3_APE_LOCK_GRC); @@ -11581,10 +11584,11 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down_prepare(tp); - - tg3_carrier_off(tp); + if (pci_device_is_present(tp->pdev)) { + tg3_power_down_prepare(tp); + tg3_carrier_off(tp); + } return 0; } @@ -16499,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Clear this out for sanity. */ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ + tw32(TG3PCI_REG_BASE_ADDR, 0); + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && @@ -17726,10 +17733,12 @@ static int tg3_suspend(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; tg3_reset_task_cancel(tp); tg3_phy_stop(tp); @@ -17771,6 +17780,8 @@ out: tg3_phy_start(tp); } +unlock: + rtnl_unlock(); return err; } @@ -17779,10 +17790,12 @@ static int tg3_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; netif_device_attach(dev); @@ -17806,6 +17819,8 @@ out: if (!err) tg3_phy_start(tp); +unlock: + rtnl_unlock(); return err; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ecd2fb3..6c93088 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -49,13 +49,15 @@ #include <asm/io.h> #include "cxgb4_uld.h" -#define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 4 -#define FW_VERSION_MICRO 0 +#define T4FW_VERSION_MAJOR 0x01 +#define T4FW_VERSION_MINOR 0x06 +#define T4FW_VERSION_MICRO 0x18 +#define T4FW_VERSION_BUILD 0x00 -#define FW_VERSION_MAJOR_T5 0 -#define FW_VERSION_MINOR_T5 0 -#define FW_VERSION_MICRO_T5 0 +#define T5FW_VERSION_MAJOR 0x01 +#define T5FW_VERSION_MINOR 0x08 +#define T5FW_VERSION_MICRO 0x1C +#define T5FW_VERSION_BUILD 0x00 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) @@ -240,6 +242,26 @@ struct pci_params { unsigned char width; }; +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_FPGA 0x100 +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + struct adapter_params { struct tp_params tp; struct vpd_params vpd; @@ -259,7 +281,7 @@ struct adapter_params { unsigned char nports; /* # of ethernet ports */ unsigned char portvec; - unsigned char rev; /* chip revision */ + enum chip_type chip; /* chip code */ unsigned char offload; unsigned char bypass; @@ -267,6 +289,23 @@ struct adapter_params { unsigned int ofldq_wr_cred; }; +#include "t4fw_api.h" + +#define FW_VERSION(chip) ( \ + FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) +#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) + +struct fw_info { + u8 chip; + char *fs_name; + char *fw_mod_name; + struct fw_hdr fw_hdr; +}; + + struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; @@ -512,25 +551,6 @@ struct sge { struct l2t_data; -#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) -#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) -#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) - -#define CHELSIO_T4 0x4 -#define CHELSIO_T5 0x5 - -enum chip_type { - T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), - T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), - T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), - T4_FIRST_REV = T4_A1, - T4_LAST_REV = T4_A3, - - T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), - T5_FIRST_REV = T5_A1, - T5_LAST_REV = T5_A1, -}; - #ifdef CONFIG_PCI_IOV /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial @@ -715,12 +735,12 @@ enum { static inline int is_t5(enum chip_type chip) { - return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; } static inline int is_t4(enum chip_type chip) { - return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) @@ -900,7 +920,11 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); -int t4_check_fw_version(struct adapter *adapter); +int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_tp_version(struct adapter *adapter, u32 *vers); +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8b929ee..d6b12e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { { 0, } }; -#define FW_FNAME "cxgb4/t4fw.bin" +#define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" -#define FW_CFNAME "cxgb4/t4-config.txt" +#define FW4_CFNAME "cxgb4/t4-config.txt" #define FW5_CFNAME "cxgb4/t5-config.txt" MODULE_DESCRIPTION(DRV_DESC); @@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); -MODULE_FIRMWARE(FW_FNAME); +MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); /* @@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap); } /* - * Returns 0 if new FW was successfully loaded, a positive errno if a load was - * started but failed, and a negative errno if flash load couldn't start. - */ -static int upgrade_fw(struct adapter *adap) -{ - int ret; - u32 vers, exp_major; - const struct fw_hdr *hdr; - const struct firmware *fw; - struct device *dev = adap->pdev_dev; - char *fw_file_name; - - switch (CHELSIO_CHIP_VERSION(adap->chip)) { - case CHELSIO_T4: - fw_file_name = FW_FNAME; - exp_major = FW_VERSION_MAJOR; - break; - case CHELSIO_T5: - fw_file_name = FW5_FNAME; - exp_major = FW_VERSION_MAJOR_T5; - break; - default: - dev_err(dev, "Unsupported chip type, %x\n", adap->chip); - return -EINVAL; - } - - ret = request_firmware(&fw, fw_file_name, dev); - if (ret < 0) { - dev_err(dev, "unable to load firmware image %s, error %d\n", - fw_file_name, ret); - return ret; - } - - hdr = (const struct fw_hdr *)fw->data; - vers = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) { - ret = -EINVAL; /* wrong major version, won't do */ - goto out; - } - - /* - * If the flash FW is unusable or we found something newer, load it. - */ - if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major || - vers > adap->params.fw_vers) { - dev_info(dev, "upgrading firmware ...\n"); - ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, - /*force=*/false); - if (!ret) - dev_info(dev, - "firmware upgraded to version %pI4 from %s\n", - &hdr->fw_ver, fw_file_name); - else - dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); - } else { - /* - * Tell our caller that we didn't upgrade the firmware. - */ - ret = -EINVAL; - } - -out: release_firmware(fw); - return ret; -} - -/* * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. * The allocated memory is cleared. */ @@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset) static int get_regs_len(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) return T4_REGMAP_SIZE; else return T5_REGMAP_SIZE; @@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); data += sizeof(struct queue_port_stats) / sizeof(u64); - if (!is_t4(adapter->chip)) { + if (!is_t4(adapter->params.chip)) { t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); val2 = t4_read_reg(adapter, SGE_STAT_MATCH); @@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, */ static inline unsigned int mk_adap_vers(const struct adapter *ap) { - return CHELSIO_CHIP_VERSION(ap->chip) | - (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); + return CHELSIO_CHIP_VERSION(ap->params.chip) | + (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); } static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, @@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, static const unsigned int *reg_ranges; int arr_size = 0, buf_size = 0; - if (is_t4(ap->chip)) { + if (is_t4(ap->params.chip)) { reg_ranges = &t4_reg_ranges[0]; arr_size = ARRAY_SIZE(t4_reg_ranges); buf_size = T4_REGMAP_SIZE; @@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap) size = t4_read_reg(adap, MA_EDRAM1_BAR); add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); } - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); if (i & EXT_MEM_ENABLE) add_debugfs_mem(adap, "mc", MEM_MC, @@ -3419,7 +3353,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { lp_count = G_LP_COUNT(v1); hp_count = G_HP_COUNT(v1); } else { @@ -3588,7 +3522,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs) do { v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { lp_count = G_LP_COUNT(v1); hp_count = G_HP_COUNT(v1); } else { @@ -3708,7 +3642,7 @@ static void process_db_drop(struct work_struct *work) adap = container_of(work, struct adapter, db_drop_task); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { disable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); drain_db_fifo(adap, 1); @@ -3753,7 +3687,7 @@ static void process_db_drop(struct work_struct *work) void t4_db_full(struct adapter *adap) { - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, SGE_INT_ENABLE3, DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); @@ -3762,7 +3696,7 @@ void t4_db_full(struct adapter *adap) void t4_db_dropped(struct adapter *adap) { - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) queue_work(workq, &adap->db_drop_task); } @@ -3789,7 +3723,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; - lli.adapter_type = adap->params.rev; + lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> @@ -4483,7 +4417,7 @@ static void setup_memwin(struct adapter *adap) u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mem_win0_base = bar0 + MEMWIN0_BASE; mem_win1_base = bar0 + MEMWIN1_BASE; mem_win2_base = bar0 + MEMWIN2_BASE; @@ -4668,8 +4602,10 @@ static int adap_init0_config(struct adapter *adapter, int reset) const struct firmware *cf; unsigned long mtype = 0, maddr = 0; u32 finiver, finicsum, cfcsum; - int ret, using_flash; + int ret; + int config_issued = 0; char *fw_config_file, fw_config_file_path[256]; + char *config_name = NULL; /* * Reset device if necessary. @@ -4686,9 +4622,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * then use that. Otherwise, use the configuration file stored * in the adapter flash ... */ - switch (CHELSIO_CHIP_VERSION(adapter->chip)) { + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { case CHELSIO_T4: - fw_config_file = FW_CFNAME; + fw_config_file = FW4_CFNAME; break; case CHELSIO_T5: fw_config_file = FW5_CFNAME; @@ -4702,13 +4638,16 @@ static int adap_init0_config(struct adapter *adapter, int reset) ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); if (ret < 0) { - using_flash = 1; + config_name = "On FLASH"; mtype = FW_MEMTYPE_CF_FLASH; maddr = t4_flash_cfg_addr(adapter); } else { u32 params[7], val[7]; - using_flash = 0; + sprintf(fw_config_file_path, + "/lib/firmware/%s", fw_config_file); + config_name = fw_config_file_path; + if (cf->size >= FLASH_CFG_MAX_SIZE) ret = -ENOMEM; else { @@ -4776,6 +4715,26 @@ static int adap_init0_config(struct adapter *adapter, int reset) FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); + + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + config_name = "Firmware Default"; + } + + config_issued = 1; if (ret < 0) goto bye; @@ -4816,7 +4775,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; - sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file); /* * Return successfully and note that we're operating with parameters * not supplied by the driver, rather than from hard-wired @@ -4824,11 +4782,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) */ adapter->flags |= USING_SOFT_PARAMS; dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ - "Configuration File %s, version %#x, computed checksum %#x\n", - (using_flash - ? "in device FLASH" - : fw_config_file_path), - finiver, cfcsum); + "Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); return 0; /* @@ -4837,9 +4792,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * want to issue a warning since this is fairly common.) */ bye: - if (ret != -ENOENT) - dev_warn(adapter->pdev_dev, "Configuration file error %d\n", - -ret); + if (config_issued && ret != -ENOENT) + dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", + config_name, -ret); return ret; } @@ -5086,6 +5041,47 @@ bye: return ret; } +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T4, + .fs_name = FW4_CFNAME, + .fw_mod_name = FW4_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T4, + .fw_ver = __cpu_to_be32(FW_VERSION(T4)), + .intfver_nic = FW_INTFVER(T4, NIC), + .intfver_vnic = FW_INTFVER(T4, VNIC), + .intfver_ri = FW_INTFVER(T4, RI), + .intfver_iscsi = FW_INTFVER(T4, ISCSI), + .intfver_fcoe = FW_INTFVER(T4, FCOE), + }, + }, { + .chip = CHELSIO_T5, + .fs_name = FW5_CFNAME, + .fw_mod_name = FW5_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ @@ -5123,44 +5119,54 @@ static int adap_init0(struct adapter *adap) * later reporting and B. to warn if the currently loaded firmware * is excessively mismatched relative to the driver.) */ - ret = t4_check_fw_version(adap); - - /* The error code -EFAULT is returned by t4_check_fw_version() if - * firmware on adapter < supported firmware. If firmware on adapter - * is too old (not supported by driver) and we're the MASTER_PF set - * adapter state to DEV_STATE_UNINIT to force firmware upgrade - * and reinitialization. - */ - if ((adap->flags & MASTER_PF) && ret == -EFAULT) - state = DEV_STATE_UNINIT; + t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_tp_version(adap, &adap->params.tp_vers); if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { - if (ret == -EINVAL || ret == -EFAULT || ret > 0) { - if (upgrade_fw(adap) >= 0) { - /* - * Note that the chip was reset as part of the - * firmware upgrade so we don't reset it again - * below and grab the new firmware version. - */ - reset = 0; - ret = t4_check_fw_version(adap); - } else - if (ret == -EFAULT) { - /* - * Firmware is old but still might - * work if we force reinitialization - * of the adapter. Ignoring FW upgrade - * failure. - */ - dev_warn(adap->pdev_dev, - "Ignoring firmware upgrade " - "failure, and forcing driver " - "to reinitialize the " - "adapter.\n"); - ret = 0; - } + struct fw_info *fw_info; + struct fw_hdr *card_fw; + const struct firmware *fw; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); + if (fw_info == NULL) { + dev_err(adap->pdev_dev, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; } + + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = t4_alloc_mem(sizeof(*card_fw)); + + /* Get FW from from /lib/firmware/ */ + ret = request_firmware(&fw, fw_info->fw_mod_name, + adap->pdev_dev); + if (ret < 0) { + dev_err(adap->pdev_dev, + "unable to load firmware image %s, error %d\n", + fw_info->fw_mod_name, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, + state, &reset); + + /* Cleaning up */ + if (fw != NULL) + release_firmware(fw); + t4_free_mem(card_fw); + if (ret < 0) - return ret; + goto bye; } /* @@ -5245,7 +5251,7 @@ static int adap_init0(struct adapter *adap) if (ret == -ENOENT) { dev_info(adap->pdev_dev, "No Configuration File present " - "on adapter. Using hard-wired " + "on adapter. Using hard-wired " "configuration parameters.\n"); ret = adap_init0_no_config(adap, reset); } @@ -5787,7 +5793,7 @@ static void print_port_info(const struct net_device *dev) netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", adap->params.vpd.id, - CHELSIO_CHIP_RELEASE(adap->params.rev), buf, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); @@ -5910,7 +5916,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_unmap_bar0; - if (!is_t4(adapter->chip)) { + if (!is_t4(adapter->params.chip)) { s_qpp = QUEUESPERPAGEPF1 * adapter->fn; qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); @@ -6064,7 +6070,7 @@ sriov: out_free_dev: free_some_resources(adapter); out_unmap_bar: - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); out_unmap_bar0: iounmap(adapter->regs); @@ -6116,7 +6122,7 @@ static void remove_one(struct pci_dev *pdev) free_some_resources(adapter); iounmap(adapter->regs); - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); kfree(adapter); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ac311f5..cc380c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) u32 val; if (q->pend_cred >= 8) { val = PIDX(q->pend_cred / 8); - if (!is_t4(adap->chip)) + if (!is_t4(adap->params.chip)) val |= DBTYPE(1); wmb(); t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | @@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) wmb(); /* write descriptors before telling HW */ spin_lock(&q->db_lock); if (!q->db_disabled) { - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), QID(q->cntxt_id) | PIDX(n)); } else { @@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap, return 0; } - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) __skb_pull(skb, sizeof(struct cpl_trace_pkt)); else __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); @@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_rx_pkt *pkt; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); struct sge *s = &q->adap->sge; - int cpl_trace_pkt = is_t4(q->adap->chip) ? + int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) @@ -2182,7 +2182,7 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->chip)) { + if (!is_t4(adap->params.chip)) { unsigned int s_qpp; unsigned short udb_density; unsigned long qpshift; @@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap) * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows * and generate an interrupt when this occurs so we can recover. */ - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, V_HP_INT_THRESH(M_HP_INT_THRESH) | V_LP_INT_THRESH(M_LP_INT_THRESH), diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4cbb2f9..74a6fce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; u32 mc_bist_status_rdata, mc_bist_data_pattern; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mc_bist_cmd = MC_BIST_CMD; mc_bist_cmd_addr = MC_BIST_CMD_ADDR; mc_bist_cmd_len = MC_BIST_CMD_LEN; @@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); @@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) { int i; - u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); + u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); /* * Setup offset into PCIE memory window. Address must be a @@ -863,104 +863,169 @@ unlock: } /** - * get_fw_version - read the firmware version + * t4_get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ -static int get_fw_version(struct adapter *adapter, u32 *vers) +int t4_get_fw_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, fw_ver), 1, vers, 0); + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); } /** - * get_tp_version - read the TP microcode version + * t4_get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ -static int get_tp_version(struct adapter *adapter, u32 *vers) +int t4_get_tp_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, adapter->params.sf_fw_start + + return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } -/** - * t4_check_fw_version - check if the FW is compatible with this driver - * @adapter: the adapter - * - * Checks if an adapter's FW is compatible with the driver. Returns 0 - * if there's exact match, a negative error if the version could not be - * read or there's a major version mismatch, and a positive value if the - * expected major version is found but there's a minor version mismatch. +/* Is the given firmware API compatible with the one the driver was compiled + * with? */ -int t4_check_fw_version(struct adapter *adapter) +static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) { - u32 api_vers[2]; - int ret, major, minor, micro; - int exp_major, exp_minor, exp_micro; - ret = get_fw_version(adapter, &adapter->params.fw_vers); - if (!ret) - ret = get_tp_version(adapter, &adapter->params.tp_vers); - if (!ret) - ret = t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, intfver_nic), - 2, api_vers, 1); - if (ret) - return ret; + /* short circuit if it's the exact same firmware version */ + if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) + return 1; - major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); - minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); - micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); +#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) + if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && + SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) + return 1; +#undef SAME_INTF - switch (CHELSIO_CHIP_VERSION(adapter->chip)) { - case CHELSIO_T4: - exp_major = FW_VERSION_MAJOR; - exp_minor = FW_VERSION_MINOR; - exp_micro = FW_VERSION_MICRO; - break; - case CHELSIO_T5: - exp_major = FW_VERSION_MAJOR_T5; - exp_minor = FW_VERSION_MINOR_T5; - exp_micro = FW_VERSION_MICRO_T5; - break; - default: - dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", - adapter->chip); - return -EINVAL; - } + return 0; +} - memcpy(adapter->params.api_vers, api_vers, - sizeof(adapter->params.api_vers)); +/* The firmware in the filesystem is usable, but should it be installed? + * This routine explains itself in detail if it indicates the filesystem + * firmware should be installed. + */ +static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, + int k, int c) +{ + const char *reason; - if (major < exp_major || (major == exp_major && minor < exp_minor) || - (major == exp_major && minor == exp_minor && micro < exp_micro)) { - dev_err(adapter->pdev_dev, - "Card has firmware version %u.%u.%u, minimum " - "supported firmware is %u.%u.%u.\n", major, minor, - micro, exp_major, exp_minor, exp_micro); - return -EFAULT; + if (!card_fw_usable) { + reason = "incompatible or unusable"; + goto install; } - if (major != exp_major) { /* major mismatch - fail */ - dev_err(adapter->pdev_dev, - "card FW has major version %u, driver wants %u\n", - major, exp_major); - return -EINVAL; + if (k > c) { + reason = "older than the version supported with this driver"; + goto install; } - if (minor == exp_minor && micro == exp_micro) - return 0; /* perfect match */ + return 0; + +install: + dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " + "installing firmware %u.%u.%u.%u on card.\n", + FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), + FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, + FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), + FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); - /* Minor/micro version mismatch. Report it but often it's OK. */ return 1; } +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, + int *reset) +{ + int ret, card_fw_usable, fs_fw_usable; + const struct fw_hdr *fs_fw; + const struct fw_hdr *drv_fw; + + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ + ret = -t4_read_flash(adap, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { + card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); + } else { + dev_err(adap->pdev_dev, + "Unable to read card's firmware header: %d\n", ret); + card_fw_usable = 0; + } + + if (fw_data != NULL) { + fs_fw = (const void *)fw_data; + fs_fw_usable = fw_compatible(drv_fw, fs_fw); + } else { + fs_fw = NULL; + fs_fw_usable = 0; + } + + if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && + (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { + /* Common case: the firmware on the card is an exact match and + * the filesystem one is an exact match too, or the filesystem + * one is absent/incompatible. + */ + } else if (fs_fw_usable && state == DEV_STATE_UNINIT && + should_install_fs_fw(adap, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { + ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); + if (ret != 0) { + dev_err(adap->pdev_dev, + "failed to install firmware: %d\n", ret); + goto bye; + } + + /* Installed successfully, update the cached header too. */ + memcpy(card_fw, fs_fw, sizeof(*card_fw)); + card_fw_usable = 1; + *reset = 0; /* already reset as part of load_fw */ + } + + if (!card_fw_usable) { + uint32_t d, c, k; + + d = be32_to_cpu(drv_fw->fw_ver); + c = be32_to_cpu(card_fw->fw_ver); + k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; + + dev_err(adap->pdev_dev, "Cannot find a usable firmware: " + "chip state %d, " + "driver compiled with %d.%d.%d.%d, " + "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", + state, + FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), + FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), + FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), + FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), + FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), + FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + ret = EINVAL; + goto bye; + } + + /* We're using whatever's on the card and it's known to be good. */ + adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); + adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); + +bye: + return ret; +} + /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter @@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter) PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, - is_t4(adapter->chip) ? + is_t4(adapter->params.chip) ? pcie_intr_info : t5_pcie_intr_info); if (fat) @@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port) { u32 v, int_cause_reg; - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); else int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); @@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) #define GET_STAT(name) \ t4_read_reg64(adap, \ - (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ + (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) @@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, { u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); @@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, int i; u32 port_cfg_reg; - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); else port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); @@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, return -EINVAL; #define EPIO_REG(name) \ - (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ + (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ T5_PORT_REG(port, MAC_PORT_EPIO_##name)) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); @@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) { int i, off; - u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); + u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); /* Align on a 2KB boundary. */ @@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; - unsigned int max_naddr = is_t4(adap->chip) ? + unsigned int max_naddr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; - unsigned int max_mac_addr = is_t4(adap->chip) ? + unsigned int max_mac_addr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter) { int ret, ver; uint16_t device_id; + u32 pl_rev; ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); - adapter->params.rev = t4_read_reg(adapter, PL_REV); + pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); ret = get_flash_params(adapter); if (ret < 0) { @@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter) */ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); ver = device_id >> 12; + adapter->params.chip = 0; switch (ver) { case CHELSIO_T4: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, - adapter->params.rev); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); break; case CHELSIO_T5: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, - adapter->params.rev); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", @@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter) return -EINVAL; } - /* Reassign the updated revision field */ - adapter->params.rev = adapter->chip; - init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index ef146c0..0a8205d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1092,6 +1092,11 @@ #define PL_REV 0x1943c +#define S_REV 0 +#define M_REV 0xfU +#define V_REV(x) ((x) << S_REV) +#define G_REV(x) (((x) >> S_REV) & M_REV) + #define LE_DB_CONFIG 0x19c04 #define HASHEN 0x00100000U @@ -1199,4 +1204,13 @@ #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) +#define A_PL_VF_REV 0x4 +#define A_PL_VF_WHOAMI 0x0 +#define A_PL_VF_REVISION 0x8 + +#define S_CHIPID 4 +#define M_CHIPID 0xfU +#define V_CHIPID(x) ((x) << S_CHIPID) +#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) + #endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 6f77ac4..74fea74 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2157,7 +2157,7 @@ struct fw_debug_cmd { struct fw_hdr { u8 ver; - u8 reserved1; + u8 chip; /* terminator chip type */ __be16 len512; /* bin length in units of 512-bytes */ __be32 fw_ver; /* firmware version */ __be32 tp_microcode_ver; @@ -2176,6 +2176,11 @@ struct fw_hdr { __be32 reserved6[23]; }; +enum fw_hdr_chip { + FW_HDR_CHIP_T4, + FW_HDR_CHIP_T5 +}; + #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index be5c7ef..68eaa9c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -344,7 +344,6 @@ struct adapter { unsigned long registered_device_map; unsigned long open_device_map; unsigned long flags; - enum chip_type chip; struct adapter_params params; /* queue and interrupt resources */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5f90ec5..0899c09 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter) /* * Chip version 4, revision 0x3f (cxgb4vf). */ - return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); + return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); } /* @@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev, reg_block_dump(adapter, regbuf, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); + + /* T5 adds new registers in the PL Register map. + */ reg_block_dump(adapter, regbuf, T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, - T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); + T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) + ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); reg_block_dump(adapter, regbuf, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); @@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter) unsigned int ethqsets; int err; u32 param, val = 0; + unsigned int chipid; /* * Wait for the device to become ready before proceeding ... @@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter) return err; } + adapter->params.chip = 0; switch (adapter->pdev->device >> 12) { case CHELSIO_T4: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); break; case CHELSIO_T5: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); + chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8475c4c..0a89963 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) val |= DBTYPE(1); wmb(); t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 53cbfed..6136245 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -39,21 +39,28 @@ #include "../cxgb4/t4fw_api.h" #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) -#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) +/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * = "a" for T4 FPGA; "b" for T4 FPGA, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + */ #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 enum chip_type { - T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), - T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), - T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), T4_FIRST_REV = T4_A1, - T4_LAST_REV = T4_A3, + T4_LAST_REV = T4_A2, - T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), - T5_FIRST_REV = T5_A1, + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, T5_LAST_REV = T5_A1, }; @@ -203,6 +210,7 @@ struct adapter_params { struct vpd_params vpd; /* Vital Product Data */ struct rss_params rss; /* Receive Side Scaling */ struct vf_resources vfres; /* Virtual Function Resource limits */ + enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ }; @@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, static inline int is_t4(enum chip_type chip) { - return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } int t4vf_wait_dev_ready(struct adapter *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9f96dc3..d958c44 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned nfilters = 0; unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - unsigned int max_naddr = is_t4(adapter->chip) ? + unsigned int max_naddr = is_t4(adapter->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, struct fw_vi_mac_exact *p = &cmd.u.exact[0]; size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[1]), 16); - unsigned int max_naddr = is_t4(adapter->chip) ? + unsigned int max_naddr = is_t4(adapter->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3e21621..dc88782 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -64,6 +64,9 @@ #define SLIPORT_ERROR_NO_RESOURCE1 0x2 #define SLIPORT_ERROR_NO_RESOURCE2 0x9 +#define SLIPORT_ERROR_FW_RESET1 0x2 +#define SLIPORT_ERROR_FW_RESET2 0x0 + /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fee64bf..0fde69d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter) */ if (sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->hw_error = true; - dev_err(&adapter->pdev->dev, - "Error detected in the card\n"); + /* Do not log error messages if its a FW reset */ + if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && + sliport_err2 == SLIPORT_ERROR_FW_RESET2) { + dev_info(&adapter->pdev->dev, + "Firmware update in progress\n"); + return; + } else { + dev_err(&adapter->pdev->dev, + "Error detected in the card\n"); + } } if (sliport_status & SLIPORT_STATUS_ERR_MASK) { @@ -2932,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter) } } -static int be_clear(struct be_adapter *adapter) +static void be_mac_clear(struct be_adapter *adapter) { int i; + if (adapter->pmac_id) { + for (i = 0; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; + } +} + +static int be_clear(struct be_adapter *adapter) +{ be_cancel_worker(adapter); if (sriov_enabled(adapter)) be_vf_clear(adapter); /* delete the primary mac along with the uc-mac list */ - for (i = 0; i < (adapter->uc_macs + 1); i++) - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); - adapter->uc_macs = 0; + be_mac_clear(adapter); be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_clear_queues(adapter); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - be_msix_disable(adapter); return 0; } @@ -3812,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter, } if (change_status == LANCER_FW_RESET_NEEDED) { + dev_info(&adapter->pdev->dev, + "Resetting adapter to activate new FW\n"); status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { @@ -4363,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter) goto err; } - dev_err(dev, "Error recovery successful\n"); + dev_err(dev, "Adapter recovery successful\n"); return 0; err: if (status == -EAGAIN) dev_err(dev, "Waiting for resource provisioning\n"); else - dev_err(dev, "Error recovery failed\n"); + dev_err(dev, "Adapter recovery failed\n"); return status; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4cbebf3..e7c8b74 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev); * detected as not set during a prior frame transmission, then the * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously - * detected as not set during a prior frame transmission, then the - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in * frames not being transmitted until there is a 0-to-1 transition on * ENET_TDAR[TDAR]. */ @@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * data. */ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); + skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { bdp->cbd_bufaddr = 0; fep->tx_skbuff[index] = NULL; @@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev) else index = bdp - fep->tx_bd_base; - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - skb = fep->tx_skbuff[index]; + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, + DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 2d1c6bd..7628e0f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, dev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | + dev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index be15938..12b0932 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; + if (!vsi->tx_rings) + return stats; + rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index c4c4fe3..ad2b74d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - udelay(usec_interval); + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b8e232b..d5f0d72 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, dev_kfree_skb_any(skb); dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - rx_desc->data_size, DMA_FROM_DEVICE); + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); } if (rx_done) @@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - rx_desc->data_size, DMA_FROM_DEVICE); + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5789ea2..01fc651 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2635,6 +2635,8 @@ static int __init mlx4_init(void) return -ENOMEM; ret = pci_register_driver(&mlx4_driver); + if (ret < 0) + destroy_workqueue(mlx4_wq); return ret < 0 ? ret : 0; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 2d045be..1e8b951 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - int result; - memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); + int result, count; + + count = nv_get_sset_count(dev, ETH_SS_TEST); + memset(buffer, 0, count * sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; @@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 return; } - if (!nv_loopback_test(dev)) { + if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[3] = 1; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b1cb0ff..6055d39 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data) qlcnic_83xx_poll_process_aen(adapter); - if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - ahw->diag_cnt++; + if (ahw->diag_test) { + if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) + ahw->diag_cnt++; qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); return IRQ_HANDLED; } @@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, } if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { - /* disable and free mailbox interrupt */ - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { - qlcnic_83xx_enable_mbx_poll(adapter); - qlcnic_83xx_free_mbx_intr(adapter); - } adapter->ahw->loopback_state = 0; adapter->ahw->hw_ops->setup_link_event(adapter, 1); } @@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; - int ring, err; + int ring; clear_bit(__QLCNIC_DEV_UP, &adapter->state); if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; - qlcnic_83xx_disable_intr(adapter, sds_ring); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_enable_mbx_poll(adapter); + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_83xx_disable_intr(adapter, sds_ring); } } qlcnic_fw_destroy_ctx(adapter); qlcnic_detach(adapter); - if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { - err = qlcnic_83xx_setup_mbx_intr(adapter); - qlcnic_83xx_disable_mbx_poll(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "%s: failed to setup mbx interrupt\n", - __func__); - goto out; - } - } - } adapter->ahw->diag_test = 0; adapter->drv_sds_rings = drv_sds_rings; @@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, if (netif_running(netdev)) __qlcnic_up(adapter, netdev); - if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && - !(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_disable_mbx_poll(adapter); out: netif_device_attach(netdev); } @@ -3754,6 +3734,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, return; } +static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 offset; + + offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", + readl(ahw->pci_base0 + offset), + QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), + QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), + QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); +} + static void qlcnic_83xx_mailbox_worker(struct work_struct *work) { struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, @@ -3798,6 +3791,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) __func__, cmd->cmd_op, cmd->type, ahw->pci_func, ahw->op_mode); clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_dump_mailbox_registers(adapter); + qlcnic_83xx_get_mbx_data(adapter, cmd); qlcnic_dump_mbx(adapter, cmd); qlcnic_83xx_idc_request_reset(adapter, QLCNIC_FORCE_FW_DUMP_KEY); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 4cae6ca..a6a3350 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, pci_channel_state_t); pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); void qlcnic_83xx_io_resume(struct pci_dev *); +void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 89208e5..918e18d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) adapter->ahw->idc.err_code = -EIO; dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } @@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; - u32 owner; u32 val; /* Perform NIC configuration based ready state entry actions */ @@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); } else { - owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); - if (ahw->pci_func == owner) - qlcnic_dump_fw(adapter); + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); } return -EIO; } @@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) +static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { - dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 val, owner; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) { + qlcnic_83xx_stop_hw(adapter); + qlcnic_dump_fw(adapter); + } + } + + netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", + __func__); clear_bit(__QLCNIC_RESETTING, &adapter->state); - adapter->ahw->idc.err_code = -EIO; + ahw->idc.err_code = -EIO; - return 0; + return; } static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) @@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; qlcnic_83xx_periodic_tasks(adapter); - /* Do not reschedule if firmaware is in hanged state and auto - * recovery is disabled - */ - if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset) - return; - /* Re-schedule the function */ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, @@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); - if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || - !qlcnic_auto_fw_reset) { - dev_err(&adapter->pdev->dev, - "%s:failed, device in non reset mode\n", __func__); + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 0); qlcnic_83xx_unlock_driver(adapter); return; } @@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) if (size & 0xF) size = (size + 16) & ~0xF; - p_cache = kzalloc(size, GFP_KERNEL); + p_cache = vzalloc(size); if (p_cache == NULL) return -ENOMEM; ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, size / sizeof(u32)); if (ret) { - kfree(p_cache); + vfree(p_cache); return ret; } /* 16 byte write to MS memory */ ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, size / 16); if (ret) { - kfree(p_cache); + vfree(p_cache); return ret; } - kfree(p_cache); + vfree(p_cache); return ret; } @@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, p_dev->ahw->reset.seq_index = index; } -static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) +void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.seq_index = 0; @@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) qlcnic_dump_fw(adapter); + + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return err; + } + qlcnic_83xx_init_hw(adapter); if (qlcnic_83xx_copy_bootloader(adapter)) @@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; - adapter->max_sds_rings = ahw->max_rx_ques; - adapter->max_tx_rings = ahw->max_tx_ques; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; + adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; } else { return -EIO; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index b36c02f..e3be276 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -667,30 +667,25 @@ qlcnic_set_ringparam(struct net_device *dev, static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, u8 rx_ring, u8 tx_ring) { + if (rx_ring == 0 || tx_ring == 0) + return -EINVAL; + if (rx_ring != 0) { if (rx_ring > adapter->max_sds_rings) { - netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", + netdev_err(adapter->netdev, + "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", rx_ring, adapter->max_sds_rings); return -EINVAL; } } if (tx_ring != 0) { - if (qlcnic_82xx_check(adapter) && - (tx_ring > adapter->max_tx_rings)) { + if (tx_ring > adapter->max_tx_rings) { netdev_err(adapter->netdev, "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", tx_ring, adapter->max_tx_rings); return -EINVAL; } - - if (qlcnic_83xx_check(adapter) && - (tx_ring > QLCNIC_SINGLE_RING)) { - netdev_err(adapter->netdev, - "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n", - tx_ring, QLCNIC_SINGLE_RING); - return -EINVAL; - } } return 0; @@ -948,6 +943,7 @@ static int qlcnic_irq_test(struct net_device *netdev) struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; int ret, drv_sds_rings = adapter->drv_sds_rings; + int drv_tx_rings = adapter->drv_tx_rings; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_interrupt_test(netdev); @@ -980,6 +976,7 @@ free_diag_res: clear_diag_irq: adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0149c94..eda6c69 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -687,17 +687,11 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) if (adapter->ahw->linkup && !linkup) { netdev_info(netdev, "NIC Link is down\n"); adapter->ahw->linkup = 0; - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - } + netif_carrier_off(netdev); } else if (!adapter->ahw->linkup && linkup) { netdev_info(netdev, "NIC Link is up\n"); adapter->ahw->linkup = 1; - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } + netif_carrier_on(netdev); } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 05c1eef..2c8cac0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) } else { adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; } @@ -1940,7 +1941,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) qlcnic_detach(adapter); adapter->drv_sds_rings = QLCNIC_SINGLE_RING; - adapter->drv_tx_rings = QLCNIC_SINGLE_RING; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 0c9c4e8..0351747 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -18,7 +18,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "1.00.00.33" +#define DRV_VERSION "1.00.00.34" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 0780e03..8dee1be 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { }; #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) +#define QLGE_RCV_MAC_ERR_STATS 7 static int ql_update_ring_coalescing(struct ql_adapter *qdev) { @@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev) iter++; } + /* Update receive mac error statistics */ + iter += QLGE_RCV_MAC_ERR_STATS; + /* * Get Per-priority TX pause frame counter statistics. */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index a245dc1..449f506 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, netdev_features_t features) { int err; - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_CTAG_RX) - features |= NETIF_F_HW_VLAN_CTAG_TX; - else - features &= ~NETIF_F_HW_VLAN_CTAG_TX; /* Update the behavior of vlan accel in the adapter */ err = qlge_update_hw_vlan_features(ndev, features); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2e27837..fd844b5 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + - NET_IP_ALIGN + efx->rx_dma_len); + efx->rx_ip_align + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { efx->rx_scatter = efx->type->always_rx_scatter; efx->rx_buffer_order = 0; @@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx) WARN_ON(channel->rx_pkt_n_frags); } + efx_ptp_start_datapath(efx); + if (netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); } @@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx) EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); + efx_ptp_stop_datapath(efx); + /* Stop RX refill */ efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) @@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx, efx->net_dev = net_dev; efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; efx->rx_packet_hash_offset = efx->type->rx_hash_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 366c8e3..4b0bd8a 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -50,6 +50,7 @@ struct efx_mcdi_async_param { static void efx_mcdi_timeout_async(unsigned long context); static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); +static bool efx_mcdi_poll_once(struct efx_nic *efx); static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { @@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) } } +static bool efx_mcdi_poll_once(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + rmb(); + if (!efx->type->mcdi_poll_response(efx)) + return false; + + spin_lock_bh(&mcdi->iface_lock); + efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); + + return true; +} + static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); @@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = jiffies; - rmb(); - if (efx->type->mcdi_poll_response(efx)) + if (efx_mcdi_poll_once(efx)) break; if (time_after(time, finish)) return -ETIMEDOUT; } - spin_lock_bh(&mcdi->iface_lock); - efx_mcdi_read_response_header(efx); - spin_unlock_bh(&mcdi->iface_lock); - /* Return rc=0 like wait_event_timeout() */ return 0; } @@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, rc = efx_mcdi_await_completion(efx); if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. @@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, ++mcdi->seqno; ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); + } - netif_err(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d mode %d timed out\n", - cmd, (int)inlen, mcdi->mode); - } else { + if (rc == 0) { size_t hdr_len, data_len; /* At the very least we need a memory barrier here to ensure diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b14a717..542a0d2 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -683,6 +683,8 @@ struct vfdi_status; * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * in accordance with NET_IP_ALIGN * @rx_dma_len: Current maximum RX DMA length * @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_truesize: Amortised allocation size of an RX buffer, @@ -816,6 +818,7 @@ struct efx_nic { unsigned rss_spread; unsigned tx_channel_offset; unsigned n_tx_channels; + unsigned int rx_ip_align; unsigned int rx_dma_len; unsigned int rx_buffer_order; unsigned int rx_buffer_truesize; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 11b6112..91c63ec 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 03acf57..3dd39dc 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -220,6 +220,7 @@ struct efx_ptp_timeset { * @evt_list: List of MC receive events awaiting packets * @evt_free_list: List of free events * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @evt_overflow: Boolean indicating that event list has overflowed * @rx_evts: Instantiated events (on evt_list and evt_free_list) * @workwq: Work queue for processing pending PTP operations * @work: Work task @@ -270,6 +271,7 @@ struct efx_ptp_data { struct list_head evt_list; struct list_head evt_free_list; spinlock_t evt_lock; + bool evt_overflow; struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; struct workqueue_struct *workwq; struct work_struct work; @@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) } } } + /* If the event overflow flag is set and the event list is now empty + * clear the flag to re-enable the overflow warning message. + */ + if (ptp->evt_overflow && list_empty(&ptp->evt_list)) + ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); } @@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, break; } } + /* If the event overflow flag is set and the event list is now empty + * clear the flag to re-enable the overflow warning message. + */ + if (ptp->evt_overflow && list_empty(&ptp->evt_list)) + ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); return rc; @@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) __skb_queue_tail(q, skb); } else if (time_after(jiffies, match->expiry)) { match->state = PTP_PACKET_STATE_TIMED_OUT; - netif_warn(efx, rx_err, efx->net_dev, - "PTP packet - no timestamp seen\n"); + if (net_ratelimit()) + netif_warn(efx, rx_err, efx->net_dev, + "PTP packet - no timestamp seen\n"); __skb_queue_tail(q, skb); } else { /* Replace unprocessed entry and stop */ @@ -788,9 +801,14 @@ fail: static int efx_ptp_stop(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - int rc = efx_ptp_disable(efx); struct list_head *cursor; struct list_head *next; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); if (ptp->rxfilter_installed) { efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, @@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx) list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { list_move(cursor, &efx->ptp_data->evt_free_list); } + ptp->evt_overflow = false; spin_unlock_bh(&efx->ptp_data->evt_lock); return rc; } +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) + return efx_ptp_start(efx); + return 0; +} + static void efx_ptp_pps_worker(struct work_struct *work) { struct efx_ptp_data *ptp = @@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) spin_lock_init(&ptp->evt_lock); for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + ptp->evt_overflow = false; ptp->phc_clock_info.owner = THIS_MODULE; snprintf(ptp->phc_clock_info.name, @@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) skb->len >= PTP_MIN_LENGTH && skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); } @@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, { if ((enable_wanted != efx->ptp_data->enabled) || (enable_wanted && (efx->ptp_data->mode != new_mode))) { - int rc; + int rc = 0; if (enable_wanted) { /* Change of mode requires disable */ @@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, * succeed. */ efx->ptp_data->mode = new_mode; - rc = efx_ptp_start(efx); + if (netif_running(efx->net_dev)) + rc = efx_ptp_start(efx); if (rc == 0) { rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS * 2); @@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) list_add_tail(&evt->link, &ptp->evt_list); queue_work(ptp->workwq, &ptp->work); - } else { - netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); + } else if (!ptp->evt_overflow) { + /* Log a warning message and set the event overflow flag. + * The message won't be logged again until the event queue + * becomes empty. + */ + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); + ptp->evt_overflow = true; } spin_unlock_bh(&ptp->evt_lock); } @@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) if (rc != 0) return rc; - ptp_data->current_adjfreq = delta; + ptp_data->current_adjfreq = adjustment_ns; return 0; } @@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), @@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx) efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = &efx_ptp_channel_type; } + +void efx_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); +} + +void efx_ptp_stop_datapath(struct efx_nic *efx) +{ + efx_ptp_stop(efx); +} diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8f09e68..42488df 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / @@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; + rx_buf->dma_addr = dma_addr + efx->rx_ip_align; rx_buf->page = page; - rx_buf->page_offset = page_offset + NET_IP_ALIGN; + rx_buf->page_offset = page_offset + efx->rx_ip_align; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0c9b5d9..8bf29eb 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -82,6 +82,7 @@ static const char version[] = #include <linux/mii.h> #include <linux/workqueue.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * } } +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id smc91x_match[] = { + { .compatible = "smsc,lan91c94", }, + { .compatible = "smsc,lan91c111", }, + {}, +}; +MODULE_DEVICE_TABLE(of, smc91x_match); +#endif + /* * smc_init(void) * Input parameters: @@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * static int smc_drv_probe(struct platform_device *pdev) { struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); + const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; struct resource *res, *ires; @@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev) */ lp = netdev_priv(ndev); + lp->cfg.flags = 0; if (pd) { memcpy(&lp->cfg, pd, sizeof(lp->cfg)); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); - } else { + } + +#if IS_BUILTIN(CONFIG_OF) + match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); + if (match) { + struct device_node *np = pdev->dev.of_node; + u32 val; + + /* Combination of IO widths supported, default to 16-bit */ + if (!of_property_read_u32(np, "reg-io-width", &val)) { + if (val & 1) + lp->cfg.flags |= SMC91X_USE_8BIT; + if ((val == 0) || (val & 2)) + lp->cfg.flags |= SMC91X_USE_16BIT; + if (val & 4) + lp->cfg.flags |= SMC91X_USE_32BIT; + } else { + lp->cfg.flags |= SMC91X_USE_16BIT; + } + } +#endif + + if (!pd && !match) { lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; @@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id smc91x_match[] = { - { .compatible = "smsc,lan91c94", }, - { .compatible = "smsc,lan91c111", }, - {}, -}; -MODULE_DEVICE_TABLE(of, smc91x_match); -#endif - static struct dev_pm_ops smc_drv_pm_ops = { .suspend = smc_drv_suspend, .resume = smc_drv_resume, diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dd0dd627..4f1d254 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM - /*| NETIF_F_FRAGLIST */ ; ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7536a4c0..5120d9c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1151,6 +1151,12 @@ static int cpsw_ndo_open(struct net_device *ndev) * receive descs */ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); + + if (cpts_register(&priv->pdev->dev, priv->cpts, + priv->data.cpts_clock_mult, + priv->data.cpts_clock_shift)) + dev_err(priv->dev, "error registering cpts device\n"); + } /* Enable Interrupt pacing if configured */ @@ -1197,6 +1203,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_carrier_off(priv->ndev); if (cpsw_common_res_usage_state(priv) <= 1) { + cpts_unregister(priv->cpts); cpsw_intr_disable(priv); cpdma_ctlr_int_ctrl(priv->dma, false); cpdma_ctlr_stop(priv->dma); @@ -1816,6 +1823,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } i++; + if (i == data->slaves) + break; } return 0; @@ -1983,9 +1992,15 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_runtime_disable_ret; } priv->regs = ss_regs; - priv->version = __raw_readl(&priv->regs->id_ver); priv->host_port = HOST_PORT_NUM; + /* Need to enable clocks with runtime PM api to access module + * registers + */ + pm_runtime_get_sync(&pdev->dev); + priv->version = readl(&priv->regs->id_ver); + pm_runtime_put_sync(&pdev->dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->wr_regs)) { @@ -2155,8 +2170,6 @@ static int cpsw_remove(struct platform_device *pdev) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); unregister_netdev(ndev); - cpts_unregister(priv->cpts); - cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 41ba974..cd9b164 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -61,6 +61,7 @@ #include <linux/davinci_emac.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_net.h> @@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; +static const struct of_device_id davinci_emac_of_match[]; + static struct emac_platform_data * davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; + const struct of_device_id *match; + const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; const u8 *mac_addr; @@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) priv->phy_node = of_parse_phandle(np, "phy-handle", 0); if (!priv->phy_node) - pdata->phy_id = ""; + pdata->phy_id = NULL; + + auxdata = pdev->dev.platform_data; + if (auxdata) { + pdata->interrupt_enable = auxdata->interrupt_enable; + pdata->interrupt_disable = auxdata->interrupt_disable; + } + + match = of_match_device(davinci_emac_of_match, &pdev->dev); + if (match && match->data) { + auxdata = match->data; + pdata->version = auxdata->version; + pdata->hw_ram_addr = auxdata->hw_ram_addr; + } pdev->dev.platform_data = pdata; @@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { }; #if IS_ENABLED(CONFIG_OF) +static const struct emac_platform_data am3517_emac_data = { + .version = EMAC_VERSION_2, + .hw_ram_addr = 0x01e20000, +}; + static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, + {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1f23641..2166e87 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op) platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; + ndev->features = NETIF_F_SG; ndev->netdev_ops = &temac_netdev_ops; ndev->ethtool_ops = &temac_ethtool_ops; #if 0 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b2ff038..f9293da 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op) SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; + ndev->features = NETIF_F_SG; ndev->netdev_ops = &axienet_netdev_ops; ndev->ethtool_ops = &axienet_ethtool_ops; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 74234a5..fefb8cd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, drvdata->base_addr + XEL_TSR_OFFSET); - /* Enable the Tx interrupts for the second Buffer if - * configured in HW */ - if (drvdata->tx_ping_pong != 0) { - reg_data = __raw_readl(drvdata->base_addr + - XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - } - /* Enable the Rx interrupts for the first buffer */ __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); - /* Enable the Rx interrupts for the second Buffer if - * configured in HW */ - if (drvdata->rx_ping_pong != 0) { - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + - XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); - } - /* Enable the Global Interrupt Enable */ __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } @@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), drvdata->base_addr + XEL_TSR_OFFSET); - /* Disable the Tx interrupts for the second Buffer - * if configured in HW */ - if (drvdata->tx_ping_pong != 0) { - reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - } - /* Disable the Rx interrupts for the first buffer */ reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), drvdata->base_addr + XEL_RSR_OFFSET); - - /* Disable the Rx interrupts for the second buffer - * if configured in HW */ - if (drvdata->rx_ping_pong != 0) { - - reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET); - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET); - } } /** @@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); + /* Output a word */ *to_u32_ptr++ = align_buffer; } @@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, for (; length > 0; length--) *to_u8_ptr++ = *from_u8_ptr++; + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); *to_u32_ptr = align_buffer; } } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 524f713..f813572 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -327,7 +327,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return -EINVAL; nvdev->start_remove = true; - cancel_delayed_work_sync(&ndevctx->dwork); cancel_work_sync(&ndevctx->work); netif_tx_disable(ndev); rndis_filter_device_remove(hdev); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9093004..2a89da0 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -770,7 +770,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, int ret; int vnet_hdr_len = 0; int vlan_offset = 0; - int copied; + int copied, total; if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; @@ -785,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) return -EFAULT; } - copied = vnet_hdr_len; + total = copied = vnet_hdr_len; + total += skb->len; if (!vlan_tx_tag_present(skb)) len = min_t(int, skb->len, len); @@ -800,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); len = min_t(int, skb->len + VLAN_HLEN, len); + total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); @@ -817,10 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, } ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); - copied += len; done: - return ret ? ret : copied; + return ret ? ret : total; } static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, @@ -875,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, } ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); - ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ + ret = min_t(ssize_t, ret, len); + if (ret > 0) + iocb->ki_pos = ret; out: return ret; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3ae28f4..26fa05a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_KSZ8041RNLI, + .phy_id_mask = 0x00fffff0, + .name = "Micrel KSZ8041RNLI", + .features = PHY_BASIC_FEATURES | + SUPPORTED_Pause | SUPPORTED_Asym_Pause, + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_KSZ8051, .phy_id_mask = 0x00fffff0, .name = "Micrel KSZ8051", diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 782e38b..7c8343a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1184,7 +1184,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, { struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; - int vlan_offset = 0; + int vlan_offset = 0, copied; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) @@ -1248,6 +1248,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, total += tun->vnet_hdr_sz; } + copied = total; + total += skb->len; if (!vlan_tx_tag_present(skb)) { len = min_t(int, skb->len, len); } else { @@ -1262,24 +1264,24 @@ static ssize_t tun_put_user(struct tun_struct *tun, vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); len = min_t(int, skb->len + VLAN_HLEN, len); + total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy); + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); len -= copy; - total += copy; + copied += copy; if (ret || !len) goto done; copy = min_t(int, sizeof(veth), len); - ret = memcpy_toiovecend(iv, (void *)&veth, total, copy); + ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); len -= copy; - total += copy; + copied += copy; if (ret || !len) goto done; } - skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len); - total += len; + skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); done: tun->dev->stats.tx_packets++; @@ -1356,6 +1358,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, ret = tun_do_read(tun, tfile, iocb, iv, len, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); + if (ret > 0) + iocb->ki_pos = ret; out: tun_put(tun); return ret; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 916241d..d208f86 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -426,10 +426,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; - if (vi->big_packets) - give_pages(rq, buf); - else if (vi->mergeable_rx_bufs) + if (vi->mergeable_rx_bufs) put_page(virt_to_head_page(buf)); + else if (vi->big_packets) + give_pages(rq, buf); else dev_kfree_skb(buf); return; @@ -1367,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev) static void virtnet_free_queues(struct virtnet_info *vi) { + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) + netif_napi_del(&vi->rq[i].napi); + kfree(vi->rq); kfree(vi->sq); } @@ -1396,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi) struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (vi->big_packets) - give_pages(&vi->rq[i], buf); - else if (vi->mergeable_rx_bufs) + if (vi->mergeable_rx_bufs) put_page(virt_to_head_page(buf)); + else if (vi->big_packets) + give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0358c07..249e01c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1668,7 +1668,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, netdev_dbg(dev, "circular route to %pI4\n", &dst->sin.sin_addr.s_addr); dev->stats.collisions++; - goto tx_error; + goto rt_tx_error; } /* Bypass encapsulation if the destination is local */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 1ec5235..130657d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3984,18 +3984,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) int quick_drop; s32 t[3], f[3] = {5180, 5500, 5785}; - if (!(pBase->miscConfiguration & BIT(1))) + if (!(pBase->miscConfiguration & BIT(4))) return; - if (freq < 4000) - quick_drop = eep->modalHeader2G.quick_drop; - else { - t[0] = eep->base_ext1.quick_drop_low; - t[1] = eep->modalHeader5G.quick_drop; - t[2] = eep->base_ext1.quick_drop_high; - quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); + if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) { + if (freq < 4000) { + quick_drop = eep->modalHeader2G.quick_drop; + } else { + t[0] = eep->base_ext1.quick_drop_low; + t[1] = eep->modalHeader5G.quick_drop; + t[2] = eep->base_ext1.quick_drop_high; + quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); + } + REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); } - REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); } static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) @@ -4035,7 +4037,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz) struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; u8 bias; - if (!(eep->baseEepHeader.featureEnable & 0x40)) + if (!(eep->baseEepHeader.miscConfiguration & 0x40)) return; if (!AR_SREV_9300(ah)) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 54b0415..8918035 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -146,10 +146,9 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah) else clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; - if (IS_CHAN_HT40(chan)) - clockrate *= 2; - - if (ah->curchan) { + if (chan) { + if (IS_CHAN_HT40(chan)) + clockrate *= 2; if (IS_CHAN_HALF_RATE(chan)) clockrate /= 2; if (IS_CHAN_QUARTER_RATE(chan)) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 09cdbcd..b5a19e0 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1276,6 +1276,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, if (!rts_thresh || (len > rts_thresh)) rts = true; } + + if (!aggr) + len = fi->framelen; + ath_buf_set_rate(sc, bf, &info, len, rts); } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index de9eb2c..3663394 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2041,13 +2041,20 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: mutex_lock(&wcn->hal_ind_mutex); msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); - msg_ind->msg_len = len; - msg_ind->msg = kmalloc(len, GFP_KERNEL); - memcpy(msg_ind->msg, buf, len); - list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); - queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); - wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); + if (msg_ind) { + msg_ind->msg_len = len; + msg_ind->msg = kmalloc(len, GFP_KERNEL); + memcpy(msg_ind->msg, buf, len); + list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); + queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); + wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); + } mutex_unlock(&wcn->hal_ind_mutex); + if (msg_ind) + break; + /* FIXME: Do something smarter then just printing an error. */ + wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", + msg_header->msg_type); break; default: wcn36xx_err("SMD_EVENT (%d) not supported\n", diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b00a7e9..54e36fc 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -5,6 +5,8 @@ config BRCMSMAC tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" depends on MAC80211 depends on BCMA + select NEW_LEDS if BCMA_DRIVER_GPIO + select LEDS_CLASS if BCMA_DRIVER_GPIO select BRCMUTIL select FW_LOADER select CRC_CCITT diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 905704e..abc9cec 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -109,6 +109,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev, brcmf_err("Disable F2 failed:%d\n", err_ret); } + } else { + err_ret = -ENOENT; } } else if ((regaddr == SDIO_CCCR_ABORT) || (regaddr == SDIO_CCCR_IENx)) { diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 85879db..3c34a72 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -67,8 +67,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 7 -#define IWL3160_UCODE_API_MAX 7 +#define IWL7260_UCODE_API_MAX 8 +#define IWL3160_UCODE_API_MAX 8 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 7 @@ -130,6 +130,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { @@ -140,6 +141,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, .high_temp = true, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7260_2n_cfg = { @@ -149,6 +151,7 @@ const struct iwl_cfg iwl7260_2n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7260_n_cfg = { @@ -158,6 +161,7 @@ const struct iwl_cfg iwl7260_n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl3160_2ac_cfg = { @@ -167,6 +171,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl3160_2n_cfg = { @@ -176,6 +181,7 @@ const struct iwl_cfg iwl3160_2n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl3160_n_cfg = { @@ -185,6 +191,7 @@ const struct iwl_cfg iwl3160_n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7265_2ac_cfg = { @@ -196,5 +203,23 @@ const struct iwl_cfg iwl7265_2ac_cfg = { .nvm_calib_ver = IWL7265_TX_POWER_VERSION, }; +const struct iwl_cfg iwl7265_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl7265_n_cfg = { + .name = "Intel(R) Wireless N 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, +}; + MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 18f232e..03fd9aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -207,6 +207,8 @@ struct iwl_eeprom_params { * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @high_temp: Is this NIC is designated to be in high temperature. + * @host_interrupt_operation_mode: device needs host interrupt operation + * mode set * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -235,6 +237,7 @@ struct iwl_cfg { enum iwl_led_mode led_mode; const bool rx_with_siso_diversity; const bool internal_wimax_coex; + const bool host_interrupt_operation_mode; bool high_temp; }; @@ -294,6 +297,8 @@ extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; +extern const struct iwl_cfg iwl7265_2n_cfg; +extern const struct iwl_cfg iwl7265_n_cfg; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 54a4fdc..da4eca8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -495,14 +495,11 @@ enum secure_load_status_reg { * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit * * default interrupt coalescing timer is 64 x 32 = 2048 usecs - * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs */ #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) #define IWL_HOST_INT_TIMEOUT_DEF (0x40) #define IWL_HOST_INT_TIMEOUT_MIN (0x0) -#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) -#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) -#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_OPER_MODE BIT(31) /***************************************************************************** * 7000/3000 series SHR DTS addresses * diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 5d066cb..75b72a9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -391,7 +391,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) BT_VALID_LUT | BT_VALID_WIFI_RX_SW_PRIO_BOOST | BT_VALID_WIFI_TX_SW_PRIO_BOOST | - BT_VALID_MULTI_PRIO_LUT | BT_VALID_CORUN_LUT_20 | BT_VALID_CORUN_LUT_40 | BT_VALID_ANT_ISOLATION | @@ -842,6 +841,11 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], lockdep_is_held(&mvm->mutex)); + + /* This can happen if the station has been removed right now */ + if (IS_ERR_OR_NULL(sta)) + return; + mvmsta = (void *)sta->drv_priv; data->num_bss_ifaces++; diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 6f45966..b9b81e8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -895,7 +895,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, /* new API returns next, not last-used seqno */ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) - err -= 0x10; + err = (u16) (err - 0x10); } iwl_free_resp(&cmd); @@ -1549,7 +1549,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, if (gtkdata.unhandled_cipher) return false; if (!gtkdata.num_keys) - return true; + goto out; if (!gtkdata.last_gtk) return false; @@ -1600,6 +1600,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, (void *)&replay_ctr, GFP_KERNEL); } +out: mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 9864d71..a8fe6b4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file, if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) return -EINVAL; + if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) + return -EINVAL; + if (drain < 0 || drain > 1) + return -EINVAL; mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 33cf56f..95ce4b6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -176,8 +176,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, * P2P Device discoveribility, while there are other higher priority * events in the system). */ - if (WARN_ONCE(!le32_to_cpu(notif->status), - "Failed to schedule time event\n")) { + if (!le32_to_cpu(notif->status)) { + bool start = le32_to_cpu(notif->action) & + TE_V2_NOTIF_HOST_EVENT_START; + IWL_WARN(mvm, "Time Event %s notification failure\n", + start ? "start" : "end"); if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { iwl_mvm_te_clear_data(mvm, te_data); return; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 941c0c8..8660502 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -353,6 +353,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index fa22639..051268c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -477,4 +477,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); } +static inline void iwl_nic_error(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + set_bit(STATUS_FW_ERROR, &trans_pcie->status); + iwl_op_mode_nic_error(trans->op_mode); +} + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 3f237b4..be3995a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + /* W/A for interrupt coalescing bug in 7260 and 3160 */ + if (trans->cfg->host_interrupt_operation_mode) + iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) @@ -796,12 +800,13 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) iwl_pcie_dump_csr(trans); iwl_dump_fh(trans, NULL); + /* set the ERROR bit before we wake up the caller */ set_bit(STATUS_FW_ERROR, &trans_pcie->status); clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); wake_up(&trans_pcie->wait_command_queue); local_bh_disable(); - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); local_bh_enable(); } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 5d9337b..cde9c16 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -279,9 +279,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_pcie_apm_init(trans); - /* Set interrupt coalescing calibration timer to default (512 usecs) */ - iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); iwl_pcie_set_pwr(trans, false); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 059c5ac..0adde91 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) IWL_ERR(trans, "scratch %d = 0x%08x\n", i, le32_to_cpu(txq->scratchbufs[i].scratch)); - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); } /* @@ -1023,7 +1023,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, q->write_ptr, q->read_ptr); - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); } } @@ -1562,7 +1562,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, get_cmd_string(trans_pcie, cmd->id)); ret = -ETIMEDOUT; - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); goto cancel; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9df7bc9..c72438b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -383,6 +383,14 @@ struct hwsim_radiotap_hdr { __le16 rt_chbitmask; } __packed; +struct hwsim_radiotap_ack_hdr { + struct ieee80211_radiotap_header hdr; + u8 rt_flags; + u8 pad; + __le16 rt_channel; + __le16 rt_chbitmask; +} __packed; + /* MAC80211_HWSIM netlinf family */ static struct genl_family hwsim_genl_family = { .id = GENL_ID_GENERATE, @@ -500,7 +508,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, const u8 *addr) { struct sk_buff *skb; - struct hwsim_radiotap_hdr *hdr; + struct hwsim_radiotap_ack_hdr *hdr; u16 flags; struct ieee80211_hdr *hdr11; @@ -511,14 +519,14 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, if (skb == NULL) return; - hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr)); + hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_flags = 0; - hdr->rt_rate = 0; + hdr->pad = 0; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; hdr->rt_chbitmask = cpu_to_le16(flags); @@ -1230,7 +1238,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, HRTIMER_MODE_REL); } else if (!info->enable_beacon) { unsigned int count = 0; - ieee80211_iterate_active_interfaces( + ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_bcn_en_iter, &count); wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index c8e029d..a09398f 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, if (bss_desc && bss_desc->ssid.ssid_len && (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. ssid, &bss_desc->ssid))) { - kfree(bss_desc); - return 0; + ret = 0; + goto done; } /* Exit Adhoc mode first */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 2329ccc..870f1fa 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -368,11 +368,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, unsigned long rx_ring_ref, unsigned int tx_evtchn, unsigned int rx_evtchn) { + struct task_struct *task; int err = -ENOMEM; - /* Already connected through? */ - if (vif->tx_irq) - return 0; + BUG_ON(vif->tx_irq); + BUG_ON(vif->task); err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); if (err < 0) @@ -411,14 +411,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, } init_waitqueue_head(&vif->wq); - vif->task = kthread_create(xenvif_kthread, - (void *)vif, "%s", vif->dev->name); - if (IS_ERR(vif->task)) { + task = kthread_create(xenvif_kthread, + (void *)vif, "%s", vif->dev->name); + if (IS_ERR(task)) { pr_warn("Could not allocate kthread for %s\n", vif->dev->name); - err = PTR_ERR(vif->task); + err = PTR_ERR(task); goto err_rx_unbind; } + vif->task = task; + rtnl_lock(); if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) dev_set_mtu(vif->dev, ETH_DATA_LEN); @@ -461,8 +463,10 @@ void xenvif_disconnect(struct xenvif *vif) if (netif_carrier_ok(vif->dev)) xenvif_carrier_off(vif); - if (vif->task) + if (vif->task) { kthread_stop(vif->task); + vif->task = NULL; + } if (vif->tx_irq) { if (vif->tx_irq == vif->rx_irq) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 64f0e0d..27bbe58 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -452,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, } /* Set up a GSO prefix descriptor, if necessary */ - if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { + if ((1 << gso_type) & vif->gso_prefix_mask) { req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; @@ -1149,75 +1149,95 @@ static int xenvif_set_skb_gso(struct xenvif *vif, return 0; } -static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) +static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len, + unsigned int max) { - if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { - /* If we need to pullup then pullup to the max, so we - * won't need to do it again. - */ - int target = min_t(int, skb->len, MAX_TCP_HEADER); - __pskb_pull_tail(skb, target - skb_headlen(skb)); - } + if (skb_headlen(skb) >= len) + return 0; + + /* If we need to pullup then pullup to the max, so we + * won't need to do it again. + */ + if (max > skb->len) + max = skb->len; + + if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) + return -ENOMEM; + + if (skb_headlen(skb) < len) + return -EPROTO; + + return 0; } +/* This value should be large enough to cover a tagged ethernet header plus + * maximally sized IP and TCP or UDP headers. + */ +#define MAX_IP_HDR_LEN 128 + static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, int recalculate_partial_csum) { - struct iphdr *iph = (void *)skb->data; - unsigned int header_size; unsigned int off; - int err = -EPROTO; + bool fragment; + int err; + + fragment = false; - off = sizeof(struct iphdr); + err = maybe_pull_tail(skb, + sizeof(struct iphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) + fragment = true; - header_size = skb->network_header + off + MAX_IPOPTLEN; - maybe_pull_tail(skb, header_size); + off = ip_hdrlen(skb); - off = iph->ihl * 4; + err = -EPROTO; + + if (fragment) + goto out; - switch (iph->protocol) { + switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: + err = maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) goto out; - if (recalculate_partial_csum) { - struct tcphdr *tcph = tcp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct tcphdr); - maybe_pull_tail(skb, header_size); - - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - off, - IPPROTO_TCP, 0); - } + if (recalculate_partial_csum) + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); break; case IPPROTO_UDP: + err = maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) goto out; - if (recalculate_partial_csum) { - struct udphdr *udph = udp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct udphdr); - maybe_pull_tail(skb, header_size); - - udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - off, - IPPROTO_UDP, 0); - } + if (recalculate_partial_csum) + udp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); break; default: - if (net_ratelimit()) - netdev_err(vif->dev, - "Attempting to checksum a non-TCP/UDP packet, " - "dropping a protocol %d packet\n", - iph->protocol); goto out; } @@ -1227,121 +1247,138 @@ out: return err; } +/* This value should be large enough to cover a tagged ethernet header plus + * an IPv6 header, all options, and a maximal TCP or UDP header. + */ +#define MAX_IPV6_HDR_LEN 256 + +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) + static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, int recalculate_partial_csum) { - int err = -EPROTO; - struct ipv6hdr *ipv6h = (void *)skb->data; + int err; u8 nexthdr; - unsigned int header_size; unsigned int off; + unsigned int len; bool fragment; bool done; + fragment = false; done = false; off = sizeof(struct ipv6hdr); - header_size = skb->network_header + off; - maybe_pull_tail(skb, header_size); + err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; - nexthdr = ipv6h->nexthdr; + nexthdr = ipv6_hdr(skb)->nexthdr; - while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && - !done) { + len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); + while (off <= len && !done) { switch (nexthdr) { case IPPROTO_DSTOPTS: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: { - struct ipv6_opt_hdr *hp = (void *)(skb->data + off); + struct ipv6_opt_hdr *hp; - header_size = skb->network_header + - off + - sizeof(struct ipv6_opt_hdr); - maybe_pull_tail(skb, header_size); + err = maybe_pull_tail(skb, + off + + sizeof(struct ipv6_opt_hdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); nexthdr = hp->nexthdr; off += ipv6_optlen(hp); break; } case IPPROTO_AH: { - struct ip_auth_hdr *hp = (void *)(skb->data + off); + struct ip_auth_hdr *hp; + + err = maybe_pull_tail(skb, + off + + sizeof(struct ip_auth_hdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; - header_size = skb->network_header + - off + - sizeof(struct ip_auth_hdr); - maybe_pull_tail(skb, header_size); + hp = OPT_HDR(struct ip_auth_hdr, skb, off); + nexthdr = hp->nexthdr; + off += ipv6_authlen(hp); + break; + } + case IPPROTO_FRAGMENT: { + struct frag_hdr *hp; + + err = maybe_pull_tail(skb, + off + + sizeof(struct frag_hdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + hp = OPT_HDR(struct frag_hdr, skb, off); + + if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) + fragment = true; nexthdr = hp->nexthdr; - off += (hp->hdrlen+2)<<2; + off += sizeof(struct frag_hdr); break; } - case IPPROTO_FRAGMENT: - fragment = true; - /* fall through */ default: done = true; break; } } - if (!done) { - if (net_ratelimit()) - netdev_err(vif->dev, "Failed to parse packet header\n"); - goto out; - } + err = -EPROTO; - if (fragment) { - if (net_ratelimit()) - netdev_err(vif->dev, "Packet is a fragment!\n"); + if (!done || fragment) goto out; - } switch (nexthdr) { case IPPROTO_TCP: + err = maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) goto out; - if (recalculate_partial_csum) { - struct tcphdr *tcph = tcp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct tcphdr); - maybe_pull_tail(skb, header_size); - - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, - &ipv6h->daddr, - skb->len - off, - IPPROTO_TCP, 0); - } + if (recalculate_partial_csum) + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); break; case IPPROTO_UDP: + err = maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) goto out; - if (recalculate_partial_csum) { - struct udphdr *udph = udp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct udphdr); - maybe_pull_tail(skb, header_size); - - udph->check = ~csum_ipv6_magic(&ipv6h->saddr, - &ipv6h->daddr, - skb->len - off, - IPPROTO_UDP, 0); - } + if (recalculate_partial_csum) + udp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); break; default: - if (net_ratelimit()) - netdev_err(vif->dev, - "Attempting to checksum a non-TCP/UDP packet, " - "dropping a protocol %d packet\n", - nexthdr); goto out; } @@ -1411,14 +1448,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return false; } -static unsigned xenvif_tx_build_gops(struct xenvif *vif) +static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) { struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS)) { + < MAX_PENDING_REQS) && + (skb_queue_len(&vif->tx_queue) < budget)) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; @@ -1440,7 +1478,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) continue; } - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); + work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); if (!work_to_do) break; @@ -1580,14 +1618,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) } -static int xenvif_tx_submit(struct xenvif *vif, int budget) +static int xenvif_tx_submit(struct xenvif *vif) { struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; int work_done = 0; - while (work_done < budget && - (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { + while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; @@ -1662,14 +1699,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget) if (unlikely(!tx_work_todo(vif))) return 0; - nr_gops = xenvif_tx_build_gops(vif); + nr_gops = xenvif_tx_build_gops(vif, budget); if (nr_gops == 0) return 0; gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - work_done = xenvif_tx_submit(vif, nr_gops); + work_done = xenvif_tx_submit(vif); return work_done; } diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index c269e43..2aa7b77c 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, *value = 0; break; + case PCI_INTERRUPT_LINE: + /* LINE PIN MIN_GNT MAX_LAT */ + *value = 0; + break; + default: *value = 0xffffffff; return PCIBIOS_BAD_REGISTER_NUMBER; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 9042fdb..25f0bc6 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -19,6 +19,7 @@ #include <linux/cpu.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> +#include <linux/kexec.h> #include "pci.h" struct pci_dynid { @@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, int error, node; struct drv_dev_and_id ddi = { drv, dev, id }; - /* Execute driver initialization on node where the device's - bus is attached to. This way the driver likely allocates - its local memory on the right node without any need to - change it. */ + /* + * Execute driver initialization on node where the device is + * attached. This way the driver likely allocates its local memory + * on the right node. + */ node = dev_to_node(&dev->dev); - if (node >= 0) { + + /* + * On NUMA systems, we are likely to call a PF probe function using + * work_on_cpu(). If that probe calls pci_enable_sriov() (which + * adds the VF devices via pci_bus_add_device()), we may re-enter + * this function to call the VF probe function. Calling + * work_on_cpu() again will cause a lockdep warning. Since VFs are + * always on the same node as the PF, we can work around this by + * avoiding work_on_cpu() when we're already on the correct node. + * + * Preemption is enabled, so it's theoretically unsafe to use + * numa_node_id(), but even if we run the probe function on the + * wrong node, it should be functionally correct. + */ + if (node >= 0 && node != numa_node_id()) { int cpu; get_online_cpus(); @@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, put_online_cpus(); } else error = local_pci_probe(&ddi); + return error; } @@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev) pci_msi_shutdown(pci_dev); pci_msix_shutdown(pci_dev); +#ifdef CONFIG_KEXEC /* - * Turn off Bus Master bit on the device to tell it to not - * continue to do DMA. Don't touch devices in D3cold or unknown states. + * If this is a kexec reboot, turn off Bus Master bit on the + * device to tell it to not continue to do DMA. Don't touch + * devices in D3cold or unknown states. + * If it is not a kexec reboot, firmware will hit the PCI + * devices with big hammer and stop their DMA any way. */ - if (pci_dev->current_state <= PCI_D3hot) + if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) pci_clear_master(pci_dev); +#endif } #ifdef CONFIG_PM diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 33120d1..07369f3 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, return 0; } +bool pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +EXPORT_SYMBOL_GPL(pci_device_is_present); + #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; static DEFINE_SPINLOCK(resource_alignment_lock); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 1576851..cc9337a 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev) if (dev->is_added) { pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - device_del(&dev->dev); + device_release_driver(&dev->dev); dev->is_added = 0; } @@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev) static void pci_destroy_dev(struct pci_dev *dev) { + device_del(&dev->dev); + down_write(&pci_bus_sem); list_del(&dev->bus_list); up_write(&pci_bus_sem); diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a344f3d..330ef2d 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO config OMAP_USB2 tristate "OMAP USB2 PHY Driver" depends on ARCH_OMAP2PLUS + depends on USB_PHY select GENERIC_PHY - select USB_PHY select OMAP_CONTROL_USB help Enable this to support the transceiver that is part of SOC. This @@ -36,8 +36,8 @@ config OMAP_USB2 config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS + depends on USB_PHY select GENERIC_PHY - select USB_PHY help Enable this to support the USB OTG transceiver on TWL4030 family chips (including the TWL5030 and TPS659x0 devices). diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 03cf8fb..58e0e97 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, int id; struct phy *phy; - if (!dev) { - dev_WARN(dev, "no device provided for PHY\n"); - ret = -EINVAL; - goto err0; - } + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); phy = kzalloc(sizeof(*phy), GFP_KERNEL); - if (!phy) { - ret = -ENOMEM; - goto err0; - } + if (!phy) + return ERR_PTR(-ENOMEM); id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); if (id < 0) { dev_err(dev, "unable to get id\n"); ret = id; - goto err0; + goto free_phy; } device_initialize(&phy->dev); @@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); if (ret) - goto err1; + goto put_dev; ret = device_add(&phy->dev); if (ret) - goto err1; + goto put_dev; if (pm_runtime_enabled(dev)) { pm_runtime_enable(&phy->dev); @@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, return phy; -err1: - ida_remove(&phy_ida, phy->id); +put_dev: put_device(&phy->dev); + ida_remove(&phy_ida, phy->id); +free_phy: kfree(phy); - -err0: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(phy_create); diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 11bd0d9..e214295 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -254,7 +254,7 @@ struct sh_pfc_soc_info { #define PINMUX_GPIO(_pin) \ [GPIO_##_pin] = { \ .pin = (u16)-1, \ - .name = __stringify(name), \ + .name = __stringify(GPIO_##_pin), \ .enum_id = _pin##_DATA, \ } diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index 5917fe3..b9f1d24 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c @@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev, default: return -EINVAL; } + ret <<= ffs(mask) - 1; val = ret & mask; - val <<= ffs(mask) - 1; return as3722_update_bits(as3722, reg, mask, val); } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 3fe1313..d85f313 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev) return ""; } +static bool have_full_constraints(void) +{ + return has_full_constraints || of_have_populated_dt(); +} + /** * of_get_regulator - get a regulator device node based on supply name * @dev: Device pointer for the consumer (of regulator) device @@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, * Assume that a regulator is physically present and enabled * even if it isn't hooked up and just provide a dummy. */ - if (has_full_constraints && allow_dummy) { + if (have_full_constraints() && allow_dummy) { pr_warn("%s supply %s not found, using dummy regulator\n", devname, id); @@ -3627,7 +3632,7 @@ int regulator_suspend_finish(void) if (error) ret = error; } else { - if (!has_full_constraints) + if (!have_full_constraints()) goto unlock; if (!ops->disable) goto unlock; @@ -3825,7 +3830,7 @@ static int __init regulator_init_complete(void) if (!enabled) goto unlock; - if (has_full_constraints) { + if (have_full_constraints()) { /* We log since this may kill the system if it * goes wrong. */ rdev_info(rdev, "disabling\n"); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 032df37..8b5e4c7 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -38,7 +38,7 @@ #define PFUZE100_DEVICEID 0x0 #define PFUZE100_REVID 0x3 -#define PFUZE100_FABID 0x3 +#define PFUZE100_FABID 0x4 #define PFUZE100_SW1ABVOL 0x20 #define PFUZE100_SW1CVOL 0x2e diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 333677d..9e61922 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -438,7 +438,7 @@ common_reg: platform_set_drvdata(pdev, s2mps11); config.dev = &pdev->dev; - config.regmap = iodev->regmap; + config.regmap = iodev->regmap_pmic; config.driver_data = s2mps11; for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { if (!reg_np) { diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index cbf91e2..aeb40aa 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) config.dev = s5m8767->dev; config.init_data = pdata->regulators[i].initdata; config.driver_data = s5m8767; - config.regmap = iodev->regmap; + config.regmap = iodev->regmap_pmic; config.of_node = pdata->regulators[i].reg_node; rdev[i] = devm_regulator_register(&pdev->dev, ®ulators[id], diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index c0da95e..3281c90 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) at91_alarm_year = tm.tm_year; + tm.tm_mon = alrm->time.tm_mon; + tm.tm_mday = alrm->time.tm_mday; tm.tm_hour = alrm->time.tm_hour; tm.tm_min = alrm->time.tm_min; tm.tm_sec = alrm->time.tm_sec; diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b7fd02b..ae8119d 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -28,10 +28,20 @@ #include <linux/mfd/samsung/irq.h> #include <linux/mfd/samsung/rtc.h> +/* + * Maximum number of retries for checking changes in UDR field + * of SEC_RTC_UDR_CON register (to limit possible endless loop). + * + * After writing to RTC registers (setting time or alarm) read the UDR field + * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have + * been transferred. + */ +#define UDR_READ_RETRY_CNT 5 + struct s5m_rtc_info { struct device *dev; struct sec_pmic_dev *s5m87xx; - struct regmap *rtc; + struct regmap *regmap; struct rtc_device *rtc_dev; int irq; int device_type; @@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data) } } +/* + * Read RTC_UDR_CON register and wait till UDR field is cleared. + * This indicates that time/alarm update ended. + */ +static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info) +{ + int ret, retry = UDR_READ_RETRY_CNT; + unsigned int data; + + do { + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); + } while (--retry && (data & RTC_UDR_MASK) && !ret); + + if (!retry) + dev_err(info->dev, "waiting for UDR update, reached max number of retries\n"); + + return ret; +} + static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) { int ret; unsigned int data; - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); if (ret < 0) { dev_err(info->dev, "failed to read update reg(%d)\n", ret); return ret; @@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) data |= RTC_TIME_EN_MASK; data |= RTC_UDR_MASK; - ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); + ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data); if (ret < 0) { dev_err(info->dev, "failed to write update reg(%d)\n", ret); return ret; } - do { - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); - } while ((data & RTC_UDR_MASK) && !ret); + ret = s5m8767_wait_for_udr_update(info); return ret; } @@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info) int ret; unsigned int data; - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); if (ret < 0) { dev_err(info->dev, "%s: fail to read update reg(%d)\n", __func__, ret); @@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info) data &= ~RTC_TIME_EN_MASK; data |= RTC_UDR_MASK; - ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); + ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data); if (ret < 0) { dev_err(info->dev, "%s: fail to write update reg(%d)\n", __func__, ret); return ret; } - do { - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); - } while ((data & RTC_UDR_MASK) && !ret); + ret = s5m8767_wait_for_udr_update(info); return ret; } @@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm) u8 data[8]; int ret; - ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8); if (ret < 0) return ret; @@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm) 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); - ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8); if (ret < 0) return ret; @@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) unsigned int val; int ret, i; - ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; switch (info->device_type) { case S5M8763X: s5m8763_data_to_tm(data, &alrm->time); - ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val); + ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val); if (ret < 0) return ret; alrm->enabled = !!val; - ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); + ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val); if (ret < 0) return ret; @@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) } alrm->pending = 0; - ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); + ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val); if (ret < 0) return ret; break; @@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info) int ret, i; struct rtc_time tm; - ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info) switch (info->device_type) { case S5M8763X: - ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0); + ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0); break; case S5M8767X: for (i = 0; i < 7; i++) data[i] &= ~ALARM_ENABLE_MASK; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) u8 alarm0_conf; struct rtc_time tm; - ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) switch (info->device_type) { case S5M8763X: alarm0_conf = 0x77; - ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf); + ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf); break; case S5M8767X: @@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) if (data[RTC_YEAR1] & 0x7f) data[RTC_YEAR1] |= ALARM_ENABLE_MASK; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; ret = s5m8767_rtc_set_alarm_reg(info); @@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret < 0) return ret; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = { static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) { int ret; - ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, + ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL, WTSR_ENABLE_MASK, enable ? WTSR_ENABLE_MASK : 0); if (ret < 0) @@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable) { int ret; - ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, + ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL, SMPL_ENABLE_MASK, enable ? SMPL_ENABLE_MASK : 0); if (ret < 0) @@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) int ret; struct rtc_time tm; - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read); + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read); if (ret < 0) { dev_err(info->dev, "%s: fail to read control reg(%d)\n", __func__, ret); @@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); info->rtc_24hr_mode = 1; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2); if (ret < 0) { dev_err(info->dev, "%s: fail to write controlm reg(%d)\n", __func__, ret); @@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) ret = s5m_rtc_set_time(info->dev, &tm); } - ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON, + ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON, RTC_TCON_MASK, tp_read | RTC_TCON_MASK); if (ret < 0) dev_err(info->dev, "%s: fail to update TCON reg(%d)\n", @@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev) info->dev = &pdev->dev; info->s5m87xx = s5m87xx; - info->rtc = s5m87xx->rtc; + info->regmap = s5m87xx->regmap_rtc; info->device_type = s5m87xx->device_type; info->wtsr_smpl = s5m87xx->wtsr_smpl; switch (pdata->device_type) { case S5M8763X: - info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0; + info->irq = regmap_irq_get_virq(s5m87xx->irq_data, + S5M8763_IRQ_ALARM0); break; case S5M8767X: - info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1; + info->irq = regmap_irq_get_virq(s5m87xx->irq_data, + S5M8767_IRQ_RTCA1); break; default: @@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) if (info->wtsr_smpl) { for (i = 0; i < 3; i++) { s5m_rtc_enable_wtsr(info, false); - regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val); + regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val); pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); if (val & WTSR_ENABLE_MASK) pr_emerg("%s: fail to disable WTSR\n", @@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) s5m_rtc_enable_smpl(info, false); } +static int s5m_rtc_resume(struct device *dev) +{ + struct s5m_rtc_info *info = dev_get_drvdata(dev); + int ret = 0; + + if (device_may_wakeup(dev)) + ret = disable_irq_wake(info->irq); + + return ret; +} + +static int s5m_rtc_suspend(struct device *dev) +{ + struct s5m_rtc_info *info = dev_get_drvdata(dev); + int ret = 0; + + if (device_may_wakeup(dev)) + ret = enable_irq_wake(info->irq); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); + static const struct platform_device_id s5m_rtc_id[] = { { "s5m-rtc", 0 }, }; @@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = { .driver = { .name = "s5m-rtc", .owner = THIS_MODULE, + .pm = &s5m_rtc_pm_ops, }, .probe = s5m_rtc_probe, .shutdown = s5m_rtc_shutdown, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 5964800..38a1257 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, schedule_delayed_work(&tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, - jiffies - sess->expires); + sess->expires - jiffies); } /* ha->hardware_lock supposed to be held on entry */ @@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; - unsigned long flags; + unsigned long flags, elapsed; spin_lock_irqsave(&ha->hardware_lock, flags); while (!list_empty(&tgt->del_sess_list)) { sess = list_entry(tgt->del_sess_list.next, typeof(*sess), del_list_entry); - if (time_after_eq(jiffies, sess->expires)) { + elapsed = jiffies; + if (time_after_eq(elapsed, sess->expires)) { qlt_undelete_sess(sess); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, @@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) ha->tgt.tgt_ops->put_sess(sess); } else { schedule_delayed_work(&tgt->sess_del_work, - jiffies - sess->expires); + sess->expires - elapsed); break; } } @@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, if (rc != 0) { ha->tgt.tgt_ops = NULL; ha->tgt.target_lport_ptr = NULL; + scsi_host_put(host); } mutex_unlock(&qla_tgt_mutex); return rc; diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 8f02bf6..4964d2a 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev, release_firmware(fw); } - return ret; + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(comedi_load_firmware); diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c index 432e3f9..c55f234 100644 --- a/drivers/staging/comedi/drivers/8255_pci.c +++ b/drivers/staging/comedi/drivers/8255_pci.c @@ -63,7 +63,8 @@ enum pci_8255_boardid { BOARD_ADLINK_PCI7296, BOARD_CB_PCIDIO24, BOARD_CB_PCIDIO24H, - BOARD_CB_PCIDIO48H, + BOARD_CB_PCIDIO48H_OLD, + BOARD_CB_PCIDIO48H_NEW, BOARD_CB_PCIDIO96H, BOARD_NI_PCIDIO96, BOARD_NI_PCIDIO96B, @@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = { .dio_badr = 2, .n_8255 = 1, }, - [BOARD_CB_PCIDIO48H] = { + [BOARD_CB_PCIDIO48H_OLD] = { .name = "cb_pci-dio48h", .dio_badr = 1, .n_8255 = 2, }, + [BOARD_CB_PCIDIO48H_NEW] = { + .name = "cb_pci-dio48h", + .dio_badr = 2, + .n_8255 = 2, + }, [BOARD_CB_PCIDIO96H] = { .name = "cb_pci-dio96h", .dio_badr = 2, @@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = { { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, - { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000), + .driver_data = BOARD_CB_PCIDIO48H_OLD }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b), + .driver_data = BOARD_CB_PCIDIO48H_NEW }, { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c index 99421f9..0485d7f 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.c +++ b/drivers/staging/iio/magnetometer/hmc5843.c @@ -451,7 +451,12 @@ done: .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = idx, \ - .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ } static const struct iio_chan_spec hmc5843_channels[] = { diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 6bd015a..96e4eee 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) imx_drm_device_put(); - drm_mode_config_cleanup(imxdrm->drm); + drm_vblank_cleanup(imxdrm->drm); drm_kms_helper_poll_fini(imxdrm->drm); + drm_mode_config_cleanup(imxdrm->drm); return 0; } @@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm, if (!file->is_master) return; - for (i = 0; i < 4; i++) - imx_drm_disable_vblank(drm , i); + for (i = 0; i < MAX_CRTC; i++) + imx_drm_disable_vblank(drm, i); } static const struct file_operations imx_drm_driver_fops = { @@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) struct imx_drm_device *imxdrm = __imx_drm_device(); int ret; - drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, - imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); if (ret) return ret; @@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) drm_crtc_helper_add(imx_drm_crtc->crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); + drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, + imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); + drm_mode_group_reinit(imxdrm->drm); return 0; @@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) ret = drm_mode_group_init_legacy_group(imxdrm->drm, &imxdrm->drm->primary->mode_group); if (ret) - goto err_init; + goto err_kms; ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); if (ret) - goto err_init; + goto err_kms; /* * with vblank_disable_allowed = true, vblank interrupt will be disabled @@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) */ imxdrm->drm->vblank_disable_allowed = true; - if (!imx_drm_device_get()) + if (!imx_drm_device_get()) { ret = -EINVAL; + goto err_vblank; + } - ret = 0; + mutex_unlock(&imxdrm->mutex); + return 0; -err_init: +err_vblank: + drm_vblank_cleanup(drm); +err_kms: + drm_kms_helper_poll_fini(drm); + drm_mode_config_cleanup(drm); mutex_unlock(&imxdrm->mutex); return ret; @@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, mutex_lock(&imxdrm->mutex); + /* + * The vblank arrays are dimensioned by MAX_CRTC - we can't + * pass IDs greater than this to those functions. + */ + if (imxdrm->pipes >= MAX_CRTC) { + ret = -EINVAL; + goto err_busy; + } + if (imxdrm->drm->open_count) { ret = -EBUSY; goto err_busy; @@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, return 0; err_register: + list_del(&imx_drm_crtc->list); kfree(imx_drm_crtc); err_alloc: err_busy: diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 680f4c8..2c44fef 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c @@ -114,7 +114,6 @@ struct imx_tve { struct drm_encoder encoder; struct imx_drm_encoder *imx_drm_encoder; struct device *dev; - spinlock_t enable_lock; /* serializes tve_enable/disable */ spinlock_t lock; /* register lock */ bool enabled; int mode; @@ -146,10 +145,8 @@ __releases(&tve->lock) static void tve_enable(struct imx_tve *tve) { - unsigned long flags; int ret; - spin_lock_irqsave(&tve->enable_lock, flags); if (!tve->enabled) { tve->enabled = true; clk_prepare_enable(tve->clk); @@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve) TVE_CD_SM_IEN | TVE_CD_LM_IEN | TVE_CD_MON_END_IEN); - - spin_unlock_irqrestore(&tve->enable_lock, flags); } static void tve_disable(struct imx_tve *tve) { - unsigned long flags; int ret; - spin_lock_irqsave(&tve->enable_lock, flags); if (tve->enabled) { tve->enabled = false; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_IPU_CLK_EN | TVE_EN, 0); clk_disable_unprepare(tve->clk); } - spin_unlock_irqrestore(&tve->enable_lock, flags); } static int tve_setup_tvout(struct imx_tve *tve) @@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev) tve->dev = &pdev->dev; spin_lock_init(&tve->lock); - spin_lock_init(&tve->enable_lock); ddc_node = of_parse_phandle(np, "ddc", 0); if (ddc_node) { diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c index 7a22ce6..97ca692 100644 --- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c +++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c @@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = { }, }; +static DEFINE_MUTEX(ipu_client_id_mutex); static int ipu_client_id; -static int ipu_add_subdevice_pdata(struct device *dev, - const struct ipu_platform_reg *reg) -{ - struct platform_device *pdev; - - pdev = platform_device_register_data(dev, reg->name, ipu_client_id++, - ®->pdata, sizeof(struct ipu_platform_reg)); - - return PTR_ERR_OR_ZERO(pdev); -} - static int ipu_add_client_devices(struct ipu_soc *ipu) { - int ret; - int i; + struct device *dev = ipu->dev; + unsigned i; + int id, ret; + + mutex_lock(&ipu_client_id_mutex); + id = ipu_client_id; + ipu_client_id += ARRAY_SIZE(client_reg); + mutex_unlock(&ipu_client_id_mutex); for (i = 0; i < ARRAY_SIZE(client_reg); i++) { const struct ipu_platform_reg *reg = &client_reg[i]; - ret = ipu_add_subdevice_pdata(ipu->dev, reg); - if (ret) + struct platform_device *pdev; + + pdev = platform_device_register_data(dev, reg->name, + id++, ®->pdata, sizeof(reg->pdata)); + + if (IS_ERR(pdev)) goto err_register; } return 0; err_register: - platform_device_unregister_children(to_platform_device(ipu->dev)); + platform_device_unregister_children(to_platform_device(dev)); return ret; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d70e911..0086719 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np) */ send_sig(SIGINT, np->np_thread, 1); kthread_stop(np->np_thread); + np->np_thread = NULL; } np->np_transport->iscsit_free_np(np); @@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (((hdr->flags & ISCSI_FLAG_CMD_READ) || (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { /* - * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) - * that adds support for RESERVE/RELEASE. There is a bug - * add with this new functionality that sets R/W bits when - * neither CDB carries any READ or WRITE datapayloads. + * From RFC-3720 Section 10.3.1: + * + * "Either or both of R and W MAY be 1 when either the + * Expected Data Transfer Length and/or Bidirectional Read + * Expected Data Transfer Length are 0" + * + * For this case, go ahead and clear the unnecssary bits + * to avoid any confusion with ->data_direction. */ - if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { - hdr->flags &= ~ISCSI_FLAG_CMD_READ; - hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; - goto done; - } + hdr->flags &= ~ISCSI_FLAG_CMD_READ; + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; - pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" " set when Expected Data Transfer Length is 0 for" - " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); - return iscsit_add_reject_cmd(cmd, - ISCSI_REASON_BOOKMARK_INVALID, buf); + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); } -done: if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index e3318ed..1c0088f 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \ \ if (!capable(CAP_SYS_ADMIN)) \ return -EPERM; \ - \ + if (count >= sizeof(auth->name)) \ + return -EINVAL; \ snprintf(auth->name, sizeof(auth->name), "%s", page); \ if (!strncmp("NULL", auth->name, 4)) \ auth->naf_flags &= ~flags; \ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 4eb93b2..e29279e 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1403,11 +1403,6 @@ old_sess_out: out: stop = kthread_should_stop(); - if (!stop && signal_pending(current)) { - spin_lock_bh(&np->np_thread_lock); - stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); - spin_unlock_bh(&np->np_thread_lock); - } /* Wait for another socket.. */ if (!stop) return 1; @@ -1415,7 +1410,6 @@ exit: iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); np->np_thread_state = ISCSI_NP_THREAD_EXIT; - np->np_thread = NULL; spin_unlock_bh(&np->np_thread_lock); return 0; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 207b340..d06de84 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) dev->dev_attrib.block_size = block_size; pr_debug("dev[%p]: SE Device block_size changed to %u\n", dev, block_size); + + if (dev->dev_attrib.max_bytes_per_io) + dev->dev_attrib.hw_max_sectors = + dev->dev_attrib.max_bytes_per_io / block_size; + return 0; } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0e34cda..78241a5 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); - pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" - " MaxSectors: %u\n", - hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", + hba->hba_id, fd_host->fd_host_id); return 0; } @@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev) } dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; + dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5b..d7772c1 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -7,7 +7,10 @@ #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 -#define FD_MAX_SECTORS 2048 +/* + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call + */ +#define FD_MAX_BYTES 8388608 #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f697f8b..2a573de 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); - spin_lock_init(&acl->stats_lock); acl->dynamic_node_acl = 1; tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); - spin_lock_init(&acl->stats_lock); tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) spin_lock_init(&lun->lun_sep_lock); init_completion(&lun->lun_ref_comp); - ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); - if (ret < 0) - return ret; - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); - if (ret < 0) { - percpu_ref_cancel_init(&lun->lun_ref); + if (ret < 0) return ret; - } return 0; } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 268b627..34aacaa 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -93,6 +93,7 @@ struct n_tty_data { size_t canon_head; size_t echo_head; size_t echo_commit; + size_t echo_mark; DECLARE_BITMAP(char_map, 256); /* private to n_tty_receive_overrun (single-threaded) */ @@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; + ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; @@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty) size_t head; head = ldata->echo_head; + ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; /* Process committed echoes if the accumulated # of bytes @@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty) size_t echoed; if ((!L_ECHO(tty) && !L_ECHONL(tty)) || - ldata->echo_commit == ldata->echo_tail) + ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); + ldata->echo_commit = ldata->echo_mark; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); @@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty) tty->ops->flush_chars(tty); } +/* NB: echo_mark and echo_head should be equivalent here */ static void flush_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 4658e3e..06525f1 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) if (offset == UART_LCR) { int tries = 1000; while (tries--) { - if (value == p->serial_in(p, UART_LCR)) + unsigned int lcr = p->serial_in(p, UART_LCR); + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) return; dw8250_force_idle(p); writeb(value, p->membase + (UART_LCR << p->regshift)); @@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) if (offset == UART_LCR) { int tries = 1000; while (tries--) { - if (value == p->serial_in(p, UART_LCR)) + unsigned int lcr = p->serial_in(p, UART_LCR); + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) return; dw8250_force_idle(p); writel(value, p->membase + (UART_LCR << p->regshift)); @@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); static const struct acpi_device_id dw8250_acpi_match[] = { { "INT33C4", 0 }, { "INT33C5", 0 }, + { "INT3434", 0 }, + { "INT3435", 0 }, { "80860F0A", 0 }, { }, }; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index e46e9f3..f619ad5 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) continue; } +#ifdef SUPPORT_SYSRQ /* * uart_handle_sysrq_char() doesn't work if * spinlocked, for some reason @@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) } spin_lock(&port->lock); } +#endif port->icount.rx++; diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 22fad8a..d8a55e8 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } +/* + * ldsem_cmpxchg() updates @*old with the last-known sem->count value. + * Returns 1 if count was successfully changed; @*old will have @new value. + * Returns 0 if count was not changed; @*old will have most recent sem->count + */ static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) { - long tmp = *old; - *old = atomic_long_cmpxchg(&sem->count, *old, new); - return *old == tmp; + long tmp = atomic_long_cmpxchg(&sem->count, *old, new); + if (tmp == *old) { + *old = new; + return 1; + } else { + *old = tmp; + return 0; + } } /* diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 5d8981c..6e73f8c 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev) : CI_ROLE_GADGET; } + /* only update vbus status for peripheral */ + if (ci->role == CI_ROLE_GADGET) + ci_handle_vbus_change(ci); + ret = ci_role_start(ci, ci->role); if (ret) { dev_err(dev, "can't start %s role\n", ci_role(ci)->name); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 59e6020..526cd77 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci) return ret; disable_reg: - regulator_disable(ci->platdata->reg_vbus); + if (ci->platdata->reg_vbus) + regulator_disable(ci->platdata->reg_vbus); put_hcd: usb_put_hcd(hcd); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index b34c819..69d20fb 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci) pm_runtime_no_callbacks(&ci->gadget.dev); pm_runtime_enable(&ci->gadget.dev); - /* Update ci->vbus_active */ - ci_handle_vbus_change(ci); - return retval; destroy_eps: diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 4d38759..0b23a86 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on) { /* need autopm_get/put here to ensure the usbcore sees the new value */ int rv = usb_autopm_get_interface(intf); - if (rv < 0) - goto err; intf->needs_remote_wakeup = on; - usb_autopm_put_interface(intf); -err: - return rv; + if (!rv) + usb_autopm_put_interface(intf); + return 0; } static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 74f9cf0..a49217a 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); - usb_phy_set_suspend(dwc->usb2_phy, 0); - usb_phy_set_suspend(dwc->usb3_phy, 0); - spin_lock_init(&dwc->lock); platform_set_drvdata(pdev, dwc); @@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev) goto err0; } + usb_phy_set_suspend(dwc->usb2_phy, 0); + usb_phy_set_suspend(dwc->usb3_phy, 0); + ret = dwc3_event_buffers_setup(dwc); if (ret) { dev_err(dwc->dev, "failed to setup event buffers\n"); @@ -569,6 +569,8 @@ err2: dwc3_event_buffers_cleanup(dwc); err1: + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); dwc3_core_exit(dwc); err0: diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 418444e..8c356af 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, struct ohci_hcd *ohci; int retval; struct usb_hcd *hcd = NULL; - - if (pdev->num_resources != 2) { - pr_debug("hcd probe: invalid num_resources"); - return -ENODEV; + struct device *dev = &pdev->dev; + struct resource *res; + int irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(dev, "hcd probe: missing memory resource\n"); + return -ENXIO; } - if ((pdev->resource[0].flags != IORESOURCE_MEM) - || (pdev->resource[1].flags != IORESOURCE_IRQ)) { - pr_debug("hcd probe: invalid resource type\n"); - return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_dbg(dev, "hcd probe: missing irq resource\n"); + return irq; } hcd = usb_create_hcd(driver, &pdev->dev, "at91"); if (!hcd) return -ENOMEM; - hcd->rsrc_start = pdev->resource[0].start; - hcd->rsrc_len = resource_size(&pdev->resource[0]); + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed\n"); @@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, ohci->num_ports = board->ports; at91_start_hc(pdev); - retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); + retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval == 0) return retval; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index b8dffd5..73f5208 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) * any other sleep) on Haswell machines with LPT and LPT-LP * with the new Intel BIOS */ - xhci->quirks |= XHCI_SPURIOUS_WAKEUP; + /* Limit the quirk to only known vendors, as this triggers + * yet another BIOS bug on some other machines + * https://bugzilla.kernel.org/show_bug.cgi?id=66171 + */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) + xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 08e2f39..2b41c63 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -19,8 +19,9 @@ config AB8500_USB in host mode, low speed. config FSL_USB2_OTG - bool "Freescale USB OTG Transceiver Driver" + tristate "Freescale USB OTG Transceiver Driver" depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME + depends on USB select USB_OTG select USB_PHY help @@ -29,6 +30,7 @@ config FSL_USB2_OTG config ISP1301_OMAP tristate "Philips ISP1301 with OMAP OTG" depends on I2C && ARCH_OMAP_OTG + depends on USB select USB_PHY help If you say yes here you get support for the Philips ISP1301 diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 82232ac..bbe4f8e 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!tegra_phy->regs) { + if (!tegra_phy->pad_regs) { dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); return -ENOMEM; } diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 30e8a61..bad57ce 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module, static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) { - u8 data, ret = 0; + u8 data; + int ret; ret = twl_i2c_read_u8(module, &data, address); if (ret >= 0) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 496b7e39..cc7a241 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_MC2718 0xffe8 +#define ZTE_PRODUCT_AC2726 0xfff1 #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index fca4c75..eae2c87 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c @@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x19d2, 0xfffd) }, { USB_DEVICE(0x19d2, 0xfffc) }, { USB_DEVICE(0x19d2, 0xfffb) }, - /* AC2726, AC8710_V3 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) }, + /* AC8710_V3 */ { USB_DEVICE(0x19d2, 0xfff6) }, { USB_DEVICE(0x19d2, 0xfff7) }, { USB_DEVICE(0x19d2, 0xfff8) }, diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 55ea73f..4c02e2b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages) pfn = page_to_pfn(page); - set_phys_to_machine(pfn, frame_list[i]); - #ifdef CONFIG_XEN_HAVE_PVMMU - /* Link back into the page tables if not highmem. */ - if (xen_pv_domain() && !PageHighMem(page)) { - int ret; - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - mfn_pte(frame_list[i], PAGE_KERNEL), - 0); - BUG_ON(ret); + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + set_phys_to_machine(pfn, frame_list[i]); + + /* Link back into the page tables if not highmem. */ + if (!PageHighMem(page)) { + int ret; + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + mfn_pte(frame_list[i], PAGE_KERNEL), + 0); + BUG_ON(ret); + } } #endif @@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) enum bp_state state = BP_DONE; unsigned long pfn, i; struct page *page; - struct page *scratch_page; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, @@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) scrub_page(page); +#ifdef CONFIG_XEN_HAVE_PVMMU /* * Ballooned out frames are effectively replaced with * a scratch frame. Ensure direct mappings and the * p2m are consistent. */ - scratch_page = get_balloon_scratch_page(); -#ifdef CONFIG_XEN_HAVE_PVMMU - if (xen_pv_domain() && !PageHighMem(page)) { - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - pfn_pte(page_to_pfn(scratch_page), - PAGE_KERNEL_RO), 0); - BUG_ON(ret); - } -#endif if (!xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long p; + struct page *scratch_page = get_balloon_scratch_page(); + + if (!PageHighMem(page)) { + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + pfn_pte(page_to_pfn(scratch_page), + PAGE_KERNEL_RO), 0); + BUG_ON(ret); + } p = page_to_pfn(scratch_page); __set_phys_to_machine(pfn, pfn_to_mfn(p)); + + put_balloon_scratch_page(); } - put_balloon_scratch_page(); +#endif balloon_append(pfn_to_page(pfn)); } @@ -627,15 +630,17 @@ static int __init balloon_init(void) if (!xen_domain()) return -ENODEV; - for_each_online_cpu(cpu) - { - per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); - if (per_cpu(balloon_scratch_page, cpu) == NULL) { - pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); - return -ENOMEM; + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + for_each_online_cpu(cpu) + { + per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); + if (per_cpu(balloon_scratch_page, cpu) == NULL) { + pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); + return -ENOMEM; + } } + register_cpu_notifier(&balloon_cpu_notifier); } - register_cpu_notifier(&balloon_cpu_notifier); pr_info("Initialising balloon driver\n"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 02838719..aa846a4 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1176,7 +1176,8 @@ static int gnttab_setup(void) gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); if (gnttab_shared.addr == NULL) { - pr_warn("Failed to ioremap gnttab share frames!\n"); + pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", + xen_hvm_resume_frames); return -ENOMEM; } } diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8e74590..569a13b 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma) { struct page **pages = vma->vm_private_data; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + int rc; if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) return; - xen_unmap_domain_mfn_range(vma, numpgs, pages); - free_xenballooned_pages(numpgs, pages); + rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); + if (rc == 0) + free_xenballooned_pages(numpgs, pages); + else + pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", + numpgs, rc); kfree(pages); } @@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx) int i; for (i = 0; i < ctx->nr_pages; i++) { + struct page *page; pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, page_count(ctx->ring_pages[i])); - put_page(ctx->ring_pages[i]); + page = ctx->ring_pages[i]; + if (!page) + continue; + ctx->ring_pages[i] = NULL; + put_page(page); } put_aio_ring_file(ctx); @@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, unsigned long flags; int rc; + rc = 0; + + /* Make sure the old page hasn't already been changed */ + spin_lock(&mapping->private_lock); + ctx = mapping->private_data; + if (ctx) { + pgoff_t idx; + spin_lock_irqsave(&ctx->completion_lock, flags); + idx = old->index; + if (idx < (pgoff_t)ctx->nr_pages) { + if (ctx->ring_pages[idx] != old) + rc = -EAGAIN; + } else + rc = -EINVAL; + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else + rc = -EINVAL; + spin_unlock(&mapping->private_lock); + + if (rc != 0) + return rc; + /* Writeback must be complete */ BUG_ON(PageWriteback(old)); - put_page(old); + get_page(new); - rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); + rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); if (rc != MIGRATEPAGE_SUCCESS) { - get_page(old); + put_page(new); return rc; } - get_page(new); - /* We can potentially race against kioctx teardown here. Use the * address_space's private data lock to protect the mapping's * private_data. @@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, spin_lock_irqsave(&ctx->completion_lock, flags); migrate_page_copy(new, old); idx = old->index; - if (idx < (pgoff_t)ctx->nr_pages) - ctx->ring_pages[idx] = new; + if (idx < (pgoff_t)ctx->nr_pages) { + /* And only do the move if things haven't changed */ + if (ctx->ring_pages[idx] == old) + ctx->ring_pages[idx] = new; + else + rc = -EAGAIN; + } else + rc = -EINVAL; spin_unlock_irqrestore(&ctx->completion_lock, flags); } else rc = -EBUSY; spin_unlock(&mapping->private_lock); + if (rc == MIGRATEPAGE_SUCCESS) + put_page(old); + else + put_page(new); + return rc; } #endif @@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx) struct aio_ring *ring; unsigned nr_events = ctx->max_reqs; struct mm_struct *mm = current->mm; - unsigned long size, populate; + unsigned long size, unused; int nr_pages; int i; struct file *file; @@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx) return -EAGAIN; } + ctx->aio_ring_file = file; + nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) + / sizeof(struct io_event); + + ctx->ring_pages = ctx->internal_pages; + if (nr_pages > AIO_RING_PAGES) { + ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), + GFP_KERNEL); + if (!ctx->ring_pages) { + put_aio_ring_file(ctx); + return -ENOMEM; + } + } + for (i = 0; i < nr_pages; i++) { struct page *page; page = find_or_create_page(file->f_inode->i_mapping, @@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx) SetPageUptodate(page); SetPageDirty(page); unlock_page(page); + + ctx->ring_pages[i] = page; } - ctx->aio_ring_file = file; - nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) - / sizeof(struct io_event); + ctx->nr_pages = i; - ctx->ring_pages = ctx->internal_pages; - if (nr_pages > AIO_RING_PAGES) { - ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), - GFP_KERNEL); - if (!ctx->ring_pages) { - put_aio_ring_file(ctx); - return -ENOMEM; - } + if (unlikely(i != nr_pages)) { + aio_free_ring(ctx); + return -EAGAIN; } ctx->mmap_size = nr_pages * PAGE_SIZE; @@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx) down_write(&mm->mmap_sem); ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, 0, &populate); + MAP_SHARED, 0, &unused); + up_write(&mm->mmap_sem); if (IS_ERR((void *)ctx->mmap_base)) { - up_write(&mm->mmap_sem); ctx->mmap_size = 0; aio_free_ring(ctx); return -EAGAIN; @@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx) pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); - /* We must do this while still holding mmap_sem for write, as we - * need to be protected against userspace attempting to mremap() - * or munmap() the ring buffer. - */ - ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, - 1, 0, ctx->ring_pages, NULL); - - /* Dropping the reference here is safe as the page cache will hold - * onto the pages for us. It is also required so that page migration - * can unmap the pages and get the right reference count. - */ - for (i = 0; i < ctx->nr_pages; i++) - put_page(ctx->ring_pages[i]); - - up_write(&mm->mmap_sem); - - if (unlikely(ctx->nr_pages != nr_pages)) { - aio_free_ring(ctx); - return -EAGAIN; - } - ctx->user_id = ctx->mmap_base; ctx->nr_events = nr_events; /* trusted copy */ @@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) aio_nr += ctx->max_reqs; spin_unlock(&aio_nr_lock); - percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ + percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ + percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ err = ioctx_add_table(ctx, mm); if (err) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 45d98d0..9c01509 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - if (metadata) { - key.objectid = bytenr; - key.type = BTRFS_METADATA_ITEM_KEY; - key.offset = offset; - } else { - key.objectid = bytenr; - key.type = BTRFS_EXTENT_ITEM_KEY; - key.offset = offset; - } - if (!trans) { path->skip_locking = 1; path->search_commit_root = 1; } + +search_again: + key.objectid = bytenr; + key.offset = offset; + if (metadata) + key.type = BTRFS_METADATA_ITEM_KEY; + else + key.type = BTRFS_EXTENT_ITEM_KEY; + again: ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path, 0, 0); @@ -788,7 +787,6 @@ again: goto out_free; if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { - metadata = 0; if (path->slots[0]) { path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, @@ -855,7 +853,7 @@ again: mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); - goto again; + goto search_again; } if (head->extent_op && head->extent_op->update_flags) extent_flags |= head->extent_op->flags_to_set; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a1116225..21da576 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); if (err == -EINTR) - goto out; + goto out_drop_write; dentry = lookup_one_len(vol_args->name, parent, namelen); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); @@ -2284,6 +2284,7 @@ out_dput: dput(dentry); out_unlock_dir: mutex_unlock(&dir->i_mutex); +out_drop_write: mnt_drop_write_file(file); out: kfree(vol_args); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ce459a7..429c73c 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid) root_objectid == BTRFS_CHUNK_TREE_OBJECTID || root_objectid == BTRFS_DEV_TREE_OBJECTID || root_objectid == BTRFS_TREE_LOG_OBJECTID || - root_objectid == BTRFS_CSUM_TREE_OBJECTID) + root_objectid == BTRFS_CSUM_TREE_OBJECTID || + root_objectid == BTRFS_UUID_TREE_OBJECTID || + root_objectid == BTRFS_QUOTA_TREE_OBJECTID) return 1; return 0; } @@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) } /* - * helper to update/delete the 'address of tree root -> reloc tree' + * helper to delete the 'address of tree root -> reloc tree' * mapping */ -static int __update_reloc_root(struct btrfs_root *root, int del) +static void __del_reloc_root(struct btrfs_root *root) { struct rb_node *rb_node; struct mapping_node *node = NULL; @@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del) spin_lock(&rc->reloc_root_tree.lock); rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->commit_root->start); + root->node->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); @@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del) spin_unlock(&rc->reloc_root_tree.lock); if (!node) - return 0; + return; BUG_ON((struct btrfs_root *)node->data != root); - if (!del) { - spin_lock(&rc->reloc_root_tree.lock); - node->bytenr = root->node->start; - rb_node = tree_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); - spin_unlock(&rc->reloc_root_tree.lock); - if (rb_node) - backref_tree_panic(rb_node, -EEXIST, node->bytenr); - } else { - spin_lock(&root->fs_info->trans_lock); - list_del_init(&root->root_list); - spin_unlock(&root->fs_info->trans_lock); - kfree(node); + spin_lock(&root->fs_info->trans_lock); + list_del_init(&root->root_list); + spin_unlock(&root->fs_info->trans_lock); + kfree(node); +} + +/* + * helper to update the 'address of tree root -> reloc tree' + * mapping + */ +static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) +{ + struct rb_node *rb_node; + struct mapping_node *node = NULL; + struct reloc_control *rc = root->fs_info->reloc_ctl; + + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, + root->node->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); } + spin_unlock(&rc->reloc_root_tree.lock); + + if (!node) + return 0; + BUG_ON((struct btrfs_root *)node->data != root); + + spin_lock(&rc->reloc_root_tree.lock); + node->bytenr = new_bytenr; + rb_node = tree_insert(&rc->reloc_root_tree.rb_root, + node->bytenr, &node->rb_node); + spin_unlock(&rc->reloc_root_tree.lock); + if (rb_node) + backref_tree_panic(rb_node, -EEXIST, node->bytenr); return 0; } @@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, { struct btrfs_root *reloc_root; struct btrfs_root_item *root_item; - int del = 0; int ret; if (!root->reloc_root) @@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, if (root->fs_info->reloc_ctl->merge_reloc_tree && btrfs_root_refs(root_item) == 0) { root->reloc_root = NULL; - del = 1; + __del_reloc_root(reloc_root); } - __update_reloc_root(reloc_root, del); - if (reloc_root->commit_root != reloc_root->node) { btrfs_set_root_node(root_item, reloc_root->node); free_extent_buffer(reloc_root->commit_root); @@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list) while (!list_empty(list)) { reloc_root = list_entry(list->next, struct btrfs_root, root_list); - __update_reloc_root(reloc_root, 1); + __del_reloc_root(reloc_root); free_extent_buffer(reloc_root->node); free_extent_buffer(reloc_root->commit_root); kfree(reloc_root); @@ -2332,7 +2353,7 @@ again: ret = merge_reloc_root(rc, root); if (ret) { - __update_reloc_root(reloc_root, 1); + __del_reloc_root(reloc_root); free_extent_buffer(reloc_root->node); free_extent_buffer(reloc_root->commit_root); kfree(reloc_root); @@ -2388,6 +2409,13 @@ out: btrfs_std_error(root->fs_info, ret); if (!list_empty(&reloc_roots)) free_reloc_roots(&reloc_roots); + + /* new reloc root may be added */ + mutex_lock(&root->fs_info->reloc_mutex); + list_splice_init(&rc->reloc_roots, &reloc_roots); + mutex_unlock(&root->fs_info->reloc_mutex); + if (!list_empty(&reloc_roots)) + free_reloc_roots(&reloc_roots); } BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); @@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, BUG_ON(rc->stage == UPDATE_DATA_PTRS && root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { + if (buf == root->node) + __update_reloc_root(root, cow->start); + } + level = btrfs_header_level(buf); if (btrfs_header_generation(buf) <= btrfs_root_last_snapshot(&root->root_item)) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 6837fe8..945d1db 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) } if (!access_ok(VERIFY_READ, arg->clone_sources, - sizeof(*arg->clone_sources * - arg->clone_sources_count))) { + sizeof(*arg->clone_sources) * + arg->clone_sources_count)) { ret = -EFAULT; goto out; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2d8ac1b..d71a11d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) } else { printk(KERN_INFO "btrfs: setting nodatacow\n"); } - info->compress_type = BTRFS_COMPRESS_NONE; btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); btrfs_set_opt(info->mount_opt, NODATACOW); @@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_fs_incompat(info, COMPRESS_LZO); } else if (strncmp(args[0].from, "no", 2) == 0) { compress_type = "no"; - info->compress_type = BTRFS_COMPRESS_NONE; btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); compress_force = false; @@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); pr_info("btrfs: force %s compression\n", compress_type); - } else + } else if (btrfs_test_opt(root, COMPRESS)) { pr_info("btrfs: use %s compression\n", compress_type); + } break; case Opt_ssd: printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 1e561c0..ec3ba43 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page) if (err < 0) { SetPageError(page); goto out; - } else if (err < PAGE_CACHE_SIZE) { + } else { + if (err < PAGE_CACHE_SIZE) { /* zero fill remainder of page */ - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, err, PAGE_CACHE_SIZE); + } else { + flush_dcache_page(page); + } } SetPageUptodate(page); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9a8e396..278fd28 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, struct ceph_mds_reply_inode *ininfo; struct ceph_vino vino; struct ceph_fs_client *fsc = ceph_sb_to_client(sb); - int i = 0; int err = 0; dout("fill_trace %p is_dentry %d is_target %d\n", req, @@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, } } + if (rinfo->head->is_target) { + vino.ino = le64_to_cpu(rinfo->targeti.in->ino); + vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + + in = ceph_get_inode(sb, vino); + if (IS_ERR(in)) { + err = PTR_ERR(in); + goto done; + } + req->r_target_inode = in; + + err = fill_inode(in, &rinfo->targeti, NULL, + session, req->r_request_started, + (le32_to_cpu(rinfo->head->result) == 0) ? + req->r_fmode : -1, + &req->r_caps_reservation); + if (err < 0) { + pr_err("fill_inode badness %p %llx.%llx\n", + in, ceph_vinop(in)); + goto done; + } + } + /* * ignore null lease/binding on snapdir ENOENT, or else we * will have trouble splicing in the virtual snapdir later @@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, ceph_dentry(req->r_old_dentry)->offset); dn = req->r_old_dentry; /* use old_dentry */ - in = dn->d_inode; } /* null dentry? */ @@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, } /* attach proper inode */ - ininfo = rinfo->targeti.in; - vino.ino = le64_to_cpu(ininfo->ino); - vino.snap = le64_to_cpu(ininfo->snapid); - in = dn->d_inode; - if (!in) { - in = ceph_get_inode(sb, vino); - if (IS_ERR(in)) { - pr_err("fill_trace bad get_inode " - "%llx.%llx\n", vino.ino, vino.snap); - err = PTR_ERR(in); - d_drop(dn); - goto done; - } + if (!dn->d_inode) { + ihold(in); dn = splice_dentry(dn, in, &have_lease, true); if (IS_ERR(dn)) { err = PTR_ERR(dn); goto done; } req->r_dentry = dn; /* may have spliced */ - ihold(in); - } else if (ceph_ino(in) == vino.ino && - ceph_snap(in) == vino.snap) { - ihold(in); - } else { + } else if (dn->d_inode && dn->d_inode != in) { dout(" %p links to %p %llx.%llx, not %llx.%llx\n", - dn, in, ceph_ino(in), ceph_snap(in), - vino.ino, vino.snap); + dn, dn->d_inode, ceph_vinop(dn->d_inode), + ceph_vinop(in)); have_lease = false; - in = NULL; } if (have_lease) update_dentry_lease(dn, rinfo->dlease, session, req->r_request_started); dout(" final dn %p\n", dn); - i++; - } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || - req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { + } else if (!req->r_aborted && + (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || + req->r_op == CEPH_MDS_OP_MKSNAP)) { struct dentry *dn = req->r_dentry; /* fill out a snapdir LOOKUPSNAP dentry */ @@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, ininfo = rinfo->targeti.in; vino.ino = le64_to_cpu(ininfo->ino); vino.snap = le64_to_cpu(ininfo->snapid); - in = ceph_get_inode(sb, vino); - if (IS_ERR(in)) { - pr_err("fill_inode get_inode badness %llx.%llx\n", - vino.ino, vino.snap); - err = PTR_ERR(in); - d_delete(dn); - goto done; - } dout(" linking snapped dir %p to dn %p\n", in, dn); + ihold(in); dn = splice_dentry(dn, in, NULL, true); if (IS_ERR(dn)) { err = PTR_ERR(dn); goto done; } req->r_dentry = dn; /* may have spliced */ - ihold(in); - rinfo->head->is_dentry = 1; /* fool notrace handlers */ - } - - if (rinfo->head->is_target) { - vino.ino = le64_to_cpu(rinfo->targeti.in->ino); - vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); - - if (in == NULL || ceph_ino(in) != vino.ino || - ceph_snap(in) != vino.snap) { - in = ceph_get_inode(sb, vino); - if (IS_ERR(in)) { - err = PTR_ERR(in); - goto done; - } - } - req->r_target_inode = in; - - err = fill_inode(in, - &rinfo->targeti, NULL, - session, req->r_request_started, - (le32_to_cpu(rinfo->head->result) == 0) ? - req->r_fmode : -1, - &req->r_caps_reservation); - if (err < 0) { - pr_err("fill_inode badness %p %llx.%llx\n", - in, ceph_vinop(in)); - goto done; - } } - done: dout("fill_trace done err=%d\n", err); return err; @@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct qstr dname; struct dentry *dn; struct inode *in; - int err = 0, i; + int err = 0, ret, i; struct inode *snapdir = NULL; struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; struct ceph_dentry_info *di; @@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); } + /* FIXME: release caps/leases if error occurs */ for (i = 0; i < rinfo->dir_nr; i++) { struct ceph_vino vino; @@ -1329,9 +1298,10 @@ retry_lookup: err = -ENOMEM; goto out; } - err = ceph_init_dentry(dn); - if (err < 0) { + ret = ceph_init_dentry(dn); + if (ret < 0) { dput(dn); + err = ret; goto out; } } else if (dn->d_inode && @@ -1351,9 +1321,6 @@ retry_lookup: spin_unlock(&parent->d_lock); } - di = dn->d_fsdata; - di->offset = ceph_make_fpos(frag, i + r_readdir_offset); - /* inode */ if (dn->d_inode) { in = dn->d_inode; @@ -1366,26 +1333,39 @@ retry_lookup: err = PTR_ERR(in); goto out; } - dn = splice_dentry(dn, in, NULL, false); - if (IS_ERR(dn)) - dn = NULL; } if (fill_inode(in, &rinfo->dir_in[i], NULL, session, req->r_request_started, -1, &req->r_caps_reservation) < 0) { pr_err("fill_inode badness on %p\n", in); + if (!dn->d_inode) + iput(in); + d_drop(dn); goto next_item; } - if (dn) - update_dentry_lease(dn, rinfo->dir_dlease[i], - req->r_session, - req->r_request_started); + + if (!dn->d_inode) { + dn = splice_dentry(dn, in, NULL, false); + if (IS_ERR(dn)) { + err = PTR_ERR(dn); + dn = NULL; + goto next_item; + } + } + + di = dn->d_fsdata; + di->offset = ceph_make_fpos(frag, i + r_readdir_offset); + + update_dentry_lease(dn, rinfo->dir_dlease[i], + req->r_session, + req->r_request_started); next_item: if (dn) dput(dn); } - req->r_did_prepopulate = true; + if (err == 0) + req->r_did_prepopulate = true; out: if (snapdir) { diff --git a/fs/dcache.c b/fs/dcache.c index 4bdb300..6055d61 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -192,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char if (!tcount) return 0; } - mask = ~(~0ul << tcount*8); + mask = bytemask_from_count(tcount); return unlikely(!!((a ^ b) & mask)); } @@ -1598,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) * do a "get_unaligned()" if this helps and is sufficiently * fast. * - * - Little-endian machines (so that we can generate the mask - * of low bytes efficiently). Again, we *could* do a byte - * swapping load on big-endian architectures if that is not - * expensive enough to make the optimization worthless. - * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. @@ -1646,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len) if (!len) goto done; } - mask = ~(~0ul << len*8); + mask = bytemask_from_count(len); hash += mask & a; done: return fold_hash(hash); diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 9186c7c..b6af150 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void) } static void +nfsd_reply_cache_unhash(struct svc_cacherep *rp) +{ + hlist_del_init(&rp->c_hash); + list_del_init(&rp->c_lru); +} + +static void nfsd_reply_cache_free_locked(struct svc_cacherep *rp) { if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { @@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); if (nfsd_cache_entry_expired(rp) || num_drc_entries >= max_drc_entries) { - lru_put_end(rp); + nfsd_reply_cache_unhash(rp); prune_cache_entries(); goto search_cache; } diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 28955d4..124fc43 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, { struct proc_dir_entry *pde = PDE(file_inode(file)); unsigned long rv = -EIO; - unsigned long (*get_area)(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long) = NULL; + if (use_pde(pde)) { + typeof(proc_reg_get_unmapped_area) *get_area; + + get_area = pde->proc_fops->get_unmapped_area; #ifdef CONFIG_MMU - get_area = current->mm->get_unmapped_area; + if (!get_area) + get_area = current->mm->get_unmapped_area; #endif - if (pde->proc_fops->get_unmapped_area) - get_area = pde->proc_fops->get_unmapped_area; + if (get_area) rv = get_area(file, orig_addr, len, pgoff, flags); + else + rv = orig_addr; unuse_pde(pde); } return rv; diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b8e93a4..78c3c20 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi) pstore_get_records(0); kmsg_dump_register(&pstore_dumper); - pstore_register_console(); - pstore_register_ftrace(); + + if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { + pstore_register_console(); + pstore_register_ftrace(); + } if (pstore_update_ms >= 0) { pstore_timer.expires = jiffies + diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b94f936..35e7d08 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; struct sysfs_open_file *of; - bool has_read, has_write, has_mmap; + bool has_read, has_write; int error = -EACCES; /* need attr_sd for attr and ops, its parent for kobj */ @@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) has_read = battr->read || battr->mmap; has_write = battr->write || battr->mmap; - has_mmap = battr->mmap; } else { const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); @@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) has_read = ops->show; has_write = ops->store; - has_mmap = false; } /* check perms and supported operations */ @@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file) * open file has a separate mutex, it's okay as long as those don't * happen on the same file. At this point, we can't easily give * each file a separate locking class. Let's differentiate on - * whether the file has mmap or not for now. + * whether the file is bin or not for now. */ - if (has_mmap) + if (sysfs_is_bin(attr_sd)) mutex_init(&of->mutex); else mutex_init(&of->mutex); diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3ef11b2..3b2c14b 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -1635,7 +1635,7 @@ xfs_bmap_last_extent( * blocks at the end of the file which do not start at the previous data block, * we will try to align the new blocks at stripe unit boundaries. * - * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be + * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be * at, or past the EOF. */ STATIC int @@ -1650,9 +1650,14 @@ xfs_bmap_isaeof( bma->aeof = 0; error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, &is_empty); - if (error || is_empty) + if (error) return error; + if (is_empty) { + bma->aeof = 1; + return 0; + } + /* * Check if we are allocation or past the last extent, or at least into * the last delayed allocated extent. @@ -3643,10 +3648,19 @@ xfs_bmap_btalloc( int isaligned; int tryagain; int error; + int stripe_align; ASSERT(ap->length); mp = ap->ip->i_mount; + + /* stripe alignment for allocation is determined by mount parameters */ + stripe_align = 0; + if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) + stripe_align = mp->m_swidth; + else if (mp->m_dalign) + stripe_align = mp->m_dalign; + align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; if (unlikely(align)) { error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, @@ -3655,6 +3669,8 @@ xfs_bmap_btalloc( ASSERT(!error); ASSERT(ap->length); } + + nullfb = *ap->firstblock == NULLFSBLOCK; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); if (nullfb) { @@ -3730,7 +3746,7 @@ xfs_bmap_btalloc( */ if (!ap->flist->xbf_low && ap->aeof) { if (!ap->offset) { - args.alignment = mp->m_dalign; + args.alignment = stripe_align; atype = args.type; isaligned = 1; /* @@ -3755,13 +3771,13 @@ xfs_bmap_btalloc( * of minlen+alignment+slop doesn't go up * between the calls. */ - if (blen > mp->m_dalign && blen <= args.maxlen) - nextminlen = blen - mp->m_dalign; + if (blen > stripe_align && blen <= args.maxlen) + nextminlen = blen - stripe_align; else nextminlen = args.minlen; - if (nextminlen + mp->m_dalign > args.minlen + 1) + if (nextminlen + stripe_align > args.minlen + 1) args.minalignslop = - nextminlen + mp->m_dalign - + nextminlen + stripe_align - args.minlen - 1; else args.minalignslop = 0; @@ -3783,7 +3799,7 @@ xfs_bmap_btalloc( */ args.type = atype; args.fsbno = ap->blkno; - args.alignment = mp->m_dalign; + args.alignment = stripe_align; args.minlen = nextminlen; args.minalignslop = 0; isaligned = 1; diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5887e41..1394106 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes( XFS_BUF_UNWRITE(bp); XFS_BUF_READ(bp); XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); - xfsbdstrat(mp, bp); + + if (XFS_FORCED_SHUTDOWN(mp)) { + error = XFS_ERROR(EIO); + break; + } + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, @@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes( XFS_BUF_UNDONE(bp); XFS_BUF_UNREAD(bp); XFS_BUF_WRITE(bp); - xfsbdstrat(mp, bp); + + if (XFS_FORCED_SHUTDOWN(mp)) { + error = XFS_ERROR(EIO); + break; + } + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77..afe7645 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -698,7 +698,11 @@ xfs_buf_read_uncached( bp->b_flags |= XBF_READ; bp->b_ops = ops; - xfsbdstrat(target->bt_mount, bp); + if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { + xfs_buf_relse(bp); + return NULL; + } + xfs_buf_iorequest(bp); xfs_buf_iowait(bp); return bp; } @@ -1089,7 +1093,7 @@ xfs_bioerror( * This is meant for userdata errors; metadata bufs come with * iodone functions attached, so that we can track down errors. */ -STATIC int +int xfs_bioerror_relse( struct xfs_buf *bp) { @@ -1152,7 +1156,7 @@ xfs_bwrite( ASSERT(xfs_buf_islocked(bp)); bp->b_flags |= XBF_WRITE; - bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); + bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL); xfs_bdstrat_cb(bp); @@ -1164,25 +1168,6 @@ xfs_bwrite( return error; } -/* - * Wrapper around bdstrat so that we can stop data from going to disk in case - * we are shutting down the filesystem. Typically user data goes thru this - * path; one of the exceptions is the superblock. - */ -void -xfsbdstrat( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - if (XFS_FORCED_SHUTDOWN(mp)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); - xfs_bioerror_relse(bp); - return; - } - - xfs_buf_iorequest(bp); -} - STATIC void _xfs_buf_ioend( xfs_buf_t *bp, @@ -1516,6 +1501,12 @@ xfs_wait_buftarg( struct xfs_buf *bp; bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); + if (bp->b_flags & XBF_WRITE_FAIL) { + xfs_alert(btp->bt_mount, +"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" +"Please run xfs_repair to determine the extent of the problem.", + (long long)bp->b_bn); + } xfs_buf_rele(bp); } if (loop++ != 0) @@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit( blk_start_plug(&plug); list_for_each_entry_safe(bp, n, io_list, b_list) { - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); + bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); bp->b_flags |= XBF_WRITE; if (!wait) { diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index e656833..1cf21a4 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -45,6 +45,7 @@ typedef enum { #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ +#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ /* I/O hints for the BIO layer */ #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ @@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t; { XBF_ASYNC, "ASYNC" }, \ { XBF_DONE, "DONE" }, \ { XBF_STALE, "STALE" }, \ + { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ { XBF_SYNCIO, "SYNCIO" }, \ { XBF_FUA, "FUA" }, \ { XBF_FLUSH, "FLUSH" }, \ @@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t; { _XBF_DELWRI_Q, "DELWRI_Q" }, \ { _XBF_COMPOUND, "COMPOUND" } + /* * Internal state flags. */ @@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); /* Buffer Read and Write Routines */ extern int xfs_bwrite(struct xfs_buf *bp); - -extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); - extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int); extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); @@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, #define xfs_buf_zero(bp, off, len) \ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) +extern int xfs_bioerror_relse(struct xfs_buf *); + static inline int xfs_buf_geterror(xfs_buf_t *bp) { return bp ? bp->b_error : ENOMEM; @@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void); #define XFS_BUF_ZEROFLAGS(bp) \ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ - XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) + XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \ + XBF_WRITE_FAIL)) void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a64f67b..2227b9b 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -496,6 +496,14 @@ xfs_buf_item_unpin( } } +/* + * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 + * seconds so as to not spam logs too much on repeated detection of the same + * buffer being bad.. + */ + +DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); + STATIC uint xfs_buf_item_push( struct xfs_log_item *lip, @@ -524,6 +532,14 @@ xfs_buf_item_push( trace_xfs_buf_item_push(bip); + /* has a previous flush failed due to IO errors? */ + if ((bp->b_flags & XBF_WRITE_FAIL) && + ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { + xfs_warn(bp->b_target->bt_mount, +"Detected failing async write on buffer block 0x%llx. Retrying async write.\n", + (long long)bp->b_bn); + } + if (!xfs_buf_delwri_queue(bp, buffer_list)) rval = XFS_ITEM_FLUSHING; xfs_buf_unlock(bp); @@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks( xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ - if (!XFS_BUF_ISSTALE(bp)) { - bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; + if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { + bp->b_flags |= XBF_WRITE | XBF_ASYNC | + XBF_DONE | XBF_WRITE_FAIL; xfs_buf_iorequest(bp); } else { xfs_buf_relse(bp); diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 56369d4..48c7d18 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup( */ int /* error */ xfs_dir2_node_removename( - xfs_da_args_t *args) /* operation arguments */ + struct xfs_da_args *args) /* operation arguments */ { - xfs_da_state_blk_t *blk; /* leaf block */ + struct xfs_da_state_blk *blk; /* leaf block */ int error; /* error return value */ int rval; /* operation return value */ - xfs_da_state_t *state; /* btree cursor */ + struct xfs_da_state *state; /* btree cursor */ trace_xfs_dir2_node_removename(args); @@ -2084,19 +2084,18 @@ xfs_dir2_node_removename( state->mp = args->dp->i_mount; state->blocksize = state->mp->m_dirblksize; state->node_ents = state->mp->m_dir_node_ents; - /* - * Look up the entry we're deleting, set up the cursor. - */ + + /* Look up the entry we're deleting, set up the cursor. */ error = xfs_da3_node_lookup_int(state, &rval); if (error) - rval = error; - /* - * Didn't find it, upper layer screwed up. - */ + goto out_free; + + /* Didn't find it, upper layer screwed up. */ if (rval != EEXIST) { - xfs_da_state_free(state); - return rval; + error = rval; + goto out_free; } + blk = &state->path.blk[state->path.active - 1]; ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); ASSERT(state->extravalid); @@ -2107,7 +2106,7 @@ xfs_dir2_node_removename( error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, &state->extrablk, &rval); if (error) - return error; + goto out_free; /* * Fix the hash values up the btree. */ @@ -2122,6 +2121,7 @@ xfs_dir2_node_removename( */ if (!error) error = xfs_dir2_node_to_leaf(state); +out_free: xfs_da_state_free(state); return error; } diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 8367d6d..4f11ef0 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -157,7 +157,7 @@ xfs_ioc_trim( struct xfs_mount *mp, struct fstrim_range __user *urange) { - struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); unsigned int granularity = q->limits.discard_granularity; struct fstrim_range range; xfs_daddr_t start, end, minlen; @@ -180,7 +180,8 @@ xfs_ioc_trim( * matter as trimming blocks is an advisory interface. */ if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || - range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) + range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) || + range.len < mp->m_sb.sb_blocksize) return -XFS_ERROR(EINVAL); start = BTOBB(range.start); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index a6e54b3..02fb943 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -220,6 +220,8 @@ xfs_growfs_data_private( */ nfree = 0; for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { + __be32 *agfl_bno; + /* * AG freespace header block */ @@ -279,8 +281,10 @@ xfs_growfs_data_private( agfl->agfl_seqno = cpu_to_be32(agno); uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); } + + agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) - agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); + agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); error = xfs_bwrite(bp); xfs_buf_relse(bp); diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 4d61340..33ad9a7 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -442,7 +442,8 @@ xfs_attrlist_by_handle( return -XFS_ERROR(EPERM); if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) return -XFS_ERROR(EFAULT); - if (al_hreq.buflen > XATTR_LIST_MAX) + if (al_hreq.buflen < sizeof(struct attrlist) || + al_hreq.buflen > XATTR_LIST_MAX) return -XFS_ERROR(EINVAL); /* diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index e8fb123..a7992f8 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle( if (copy_from_user(&al_hreq, arg, sizeof(compat_xfs_fsop_attrlist_handlereq_t))) return -XFS_ERROR(EFAULT); - if (al_hreq.buflen > XATTR_LIST_MAX) + if (al_hreq.buflen < sizeof(struct attrlist) || + al_hreq.buflen > XATTR_LIST_MAX) return -XFS_ERROR(EINVAL); /* diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 27e0e54..104455b 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -618,7 +618,8 @@ xfs_setattr_nonsize( } if (!gid_eq(igid, gid)) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { - ASSERT(!XFS_IS_PQUOTA_ON(mp)); + ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) || + !XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b6b669d..eae1692 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -193,7 +193,10 @@ xlog_bread_noalign( bp->b_io_length = nbblks; bp->b_error = 0; - xfsbdstrat(log->l_mp, bp); + if (XFS_FORCED_SHUTDOWN(log->l_mp)) + return XFS_ERROR(EIO); + + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) xfs_buf_ioerror_alert(bp, __func__); @@ -4397,7 +4400,13 @@ xlog_do_recover( XFS_BUF_READ(bp); XFS_BUF_UNASYNC(bp); bp->b_ops = &xfs_sb_buf_ops; - xfsbdstrat(log->l_mp, bp); + + if (XFS_FORCED_SHUTDOWN(log->l_mp)) { + xfs_buf_relse(bp); + return XFS_ERROR(EIO); + } + + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, __func__); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 14a4996..dd88f0e 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -134,8 +134,6 @@ xfs_qm_dqpurge( { struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; - struct xfs_dquot *gdqp = NULL; - struct xfs_dquot *pdqp = NULL; xfs_dqlock(dqp); if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { @@ -143,21 +141,6 @@ xfs_qm_dqpurge( return EAGAIN; } - /* - * If this quota has a hint attached, prepare for releasing it now. - */ - gdqp = dqp->q_gdquot; - if (gdqp) { - xfs_dqlock(gdqp); - dqp->q_gdquot = NULL; - } - - pdqp = dqp->q_pdquot; - if (pdqp) { - xfs_dqlock(pdqp); - dqp->q_pdquot = NULL; - } - dqp->dq_flags |= XFS_DQ_FREEING; xfs_dqflock(dqp); @@ -206,11 +189,47 @@ xfs_qm_dqpurge( XFS_STATS_DEC(xs_qm_dquot_unused); xfs_qm_dqdestroy(dqp); + return 0; +} + +/* + * Release the group or project dquot pointers the user dquots maybe carrying + * around as a hint, and proceed to purge the user dquot cache if requested. +*/ +STATIC int +xfs_qm_dqpurge_hints( + struct xfs_dquot *dqp, + void *data) +{ + struct xfs_dquot *gdqp = NULL; + struct xfs_dquot *pdqp = NULL; + uint flags = *((uint *)data); + + xfs_dqlock(dqp); + if (dqp->dq_flags & XFS_DQ_FREEING) { + xfs_dqunlock(dqp); + return EAGAIN; + } + + /* If this quota has a hint attached, prepare for releasing it now */ + gdqp = dqp->q_gdquot; + if (gdqp) + dqp->q_gdquot = NULL; + + pdqp = dqp->q_pdquot; + if (pdqp) + dqp->q_pdquot = NULL; + + xfs_dqunlock(dqp); if (gdqp) - xfs_qm_dqput(gdqp); + xfs_qm_dqrele(gdqp); if (pdqp) - xfs_qm_dqput(pdqp); + xfs_qm_dqrele(pdqp); + + if (flags & XFS_QMOPT_UQUOTA) + return xfs_qm_dqpurge(dqp, NULL); + return 0; } @@ -222,8 +241,18 @@ xfs_qm_dqpurge_all( struct xfs_mount *mp, uint flags) { - if (flags & XFS_QMOPT_UQUOTA) - xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); + /* + * We have to release group/project dquot hint(s) from the user dquot + * at first if they are there, otherwise we would run into an infinite + * loop while walking through radix tree to purge other type of dquots + * since their refcount is not zero if the user dquot refers to them + * as hint. + * + * Call the special xfs_qm_dqpurge_hints() will end up go through the + * general xfs_qm_dqpurge() against user dquot cache if requested. + */ + xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); + if (flags & XFS_QMOPT_GQUOTA) xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); if (flags & XFS_QMOPT_PQUOTA) @@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - if (udqp) { + if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); - ASSERT(XFS_IS_UQUOTA_ON(mp)); ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); ip->i_udquot = xfs_qm_dqhold(udqp); xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); } - if (gdqp) { + if (gdqp && XFS_IS_GQUOTA_ON(mp)) { ASSERT(ip->i_gdquot == NULL); - ASSERT(XFS_IS_GQUOTA_ON(mp)); ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); ip->i_gdquot = xfs_qm_dqhold(gdqp); xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); } - if (pdqp) { + if (pdqp && XFS_IS_PQUOTA_ON(mp)) { ASSERT(ip->i_pdquot == NULL); - ASSERT(XFS_IS_PQUOTA_ON(mp)); ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); ip->i_pdquot = xfs_qm_dqhold(pdqp); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index c035d11..647b6f1 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -314,7 +314,18 @@ xfs_trans_read_buf_map( ASSERT(bp->b_iodone == NULL); XFS_BUF_READ(bp); bp->b_ops = ops; - xfsbdstrat(tp->t_mountp, bp); + + /* + * XXX(hch): clean up the error handling here to be less + * of a mess.. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + trace_xfs_bdstrat_shut(bp, _RET_IP_); + xfs_bioerror_relse(bp); + } else { + xfs_buf_iorequest(bp); + } + error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, __func__); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28..db09234 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif #ifndef pte_accessible -# define pte_accessible(pte) ((void)(pte),1) +# define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault @@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif - if (pmd_none(pmdval)) + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { - if (!pmd_trans_huge(pmdval)) - pmd_clear_bad(pmd); + pmd_clear_bad(pmd); return 1; } return 0; diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b42..1cd3f5d 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -3,13 +3,11 @@ #include <linux/thread_info.h> -/* - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users - * that think a non-zero value indicates we cannot preempt. - */ +#define PREEMPT_ENABLED (0) + static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; + return current_thread_info()->preempt_count; } static __always_inline int *preempt_count_ptr(void) @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } -/* - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the - * alternative is loosing a reschedule. Better schedule too often -- also this - * should be a very rare operation. - */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) -/* - * We fold the NEED_RESCHED bit into the preempt count such that - * preempt_enable() can decrement and test for needing to reschedule with a - * single instruction. - * - * We invert the actual bit, so that when the decrement hits 0 we know we both - * need to resched (the bit is cleared) and can resched (no preempt count). - */ - static __always_inline void set_preempt_need_resched(void) { - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; } static __always_inline void clear_preempt_need_resched(void) { - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; } static __always_inline bool test_preempt_need_resched(void) { - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); + return false; } /* @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) static __always_inline bool __preempt_count_dec_and_test(void) { - return !--*preempt_count_ptr(); + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); } /* @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { - return unlikely(!*preempt_count_ptr()); + return unlikely(!preempt_count() && tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 3f21f1b..d3909ef 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +#ifndef zero_bytemask +#ifdef CONFIG_64BIT +#define zero_bytemask(mask) (~0ul << fls64(mask)) +#else +#define zero_bytemask(mask) (~0ul << fls(mask)) +#endif /* CONFIG_64BIT */ +#endif /* zero_bytemask */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h index 9a193b8..a89df3b 100644 --- a/include/linux/assoc_array.h +++ b/include/linux/assoc_array.h @@ -41,10 +41,10 @@ struct assoc_array_ops { /* Is this the object we're looking for? */ bool (*compare_object)(const void *object, const void *index_key); - /* How different are two objects, to a bit position in their keys? (or - * -1 if they're the same) + /* How different is an object from an index key, to a bit position in + * their keys? (or -1 if they're the same) */ - int (*diff_objects)(const void *a, const void *b); + int (*diff_objects)(const void *object, const void *index_key); /* Method to free an object. */ void (*free_object)(void *object); diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 973ce10..dc1bd3d 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -28,8 +28,6 @@ #endif -#define uninitialized_var(x) x - #ifndef __HAVE_BUILTIN_BSWAP16__ /* icc has this, but it's called _bswap16 */ #define __HAVE_BUILTIN_BSWAP16__ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 57e87e7..bf72e9a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -29,8 +29,10 @@ struct vfsmount; /* The hash is always the low bits of hash_len */ #ifdef __LITTLE_ENDIAN #define HASH_LEN_DECLARE u32 hash; u32 len; + #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) #else #define HASH_LEN_DECLARE u32 len; u32 hash; + #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) #endif /* diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 9649ff0..bd7e987 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -142,7 +142,10 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) return 0; } -#define isolate_huge_page(p, l) false +static inline bool isolate_huge_page(struct page *page, struct list_head *list) +{ + return false; +} #define putback_active_hugepage(p) do {} while (0) #define is_hugepage_active(x) false diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 5d89d1b..c56c350 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -4,6 +4,7 @@ #include <uapi/linux/ipv6.h> #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) /* * This structure contains configuration options per IPv6 link. */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d4e98d1..ecb8754 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -193,7 +193,8 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) -#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) +#if defined(CONFIG_MMU) && \ + (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) void might_fault(void); #else static inline void might_fault(void) { } diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d78d28a..5fd33dc 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; extern size_t vmcoreinfo_size; extern size_t vmcoreinfo_max_size; +/* flag to track if kexec reboot is in progress */ +extern bool kexec_in_progress; + int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c8929c3..4bfde0e 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -19,7 +19,7 @@ #define USE_CMPXCHG_LOCKREF \ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ - IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) + IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) struct lockref { union { diff --git a/include/linux/math64.h b/include/linux/math64.h index 69ed5f5..c45c089 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u32_shr */ + +#else + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + u32 ah, al; + u64 ret; + + al = a; + ah = a >> 32; + + ret = ((u64)al * mul) >> shift; + if (ah) + ret += ((u64)ah * mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_shr */ + +#endif + #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 2d0c907..cab2dd2 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -39,7 +39,8 @@ enum sec_device_type { struct sec_pmic_dev { struct device *dev; struct sec_platform_data *pdata; - struct regmap *regmap; + struct regmap *regmap_pmic; + struct regmap *regmap_rtc; struct i2c_client *i2c; struct i2c_client *rtc; diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index ad05ce6..2e5b194 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -22,6 +22,8 @@ #define PHY_ID_KSZ8021 0x00221555 #define PHY_ID_KSZ8031 0x00221556 #define PHY_ID_KSZ8041 0x00221510 +/* undocumented */ +#define PHY_ID_KSZ8041RNLI 0x00221537 #define PHY_ID_KSZ8051 0x00221550 /* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ #define PHY_ID_KSZ8001 0x0022161A diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b5..f015c05 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode); + struct buffer_head *head, enum migrate_mode mode, + int extra_count); #else static inline void putback_lru_pages(struct list_head *l) {} @@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING +extern bool pmd_trans_migrating(pmd_t pmd); +extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); extern bool migrate_ratelimited(int node); #else +static inline bool pmd_trans_migrating(pmd_t pmd) +{ + return false; +} +static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ +} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd00..3552717 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return page->ptl; } -#else /* BLOATED_SPINLOCKS */ +#else /* ALLOC_SPLIT_PTLOCKS */ static inline bool ptlock_alloc(struct page *page) { return true; @@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return &page->ptl; } -#endif /* BLOATED_SPINLOCKS */ +#endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd29941..290901a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -26,6 +26,7 @@ struct address_space; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) /* * Each physical page in the system has a struct page associated with @@ -155,7 +156,7 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS spinlock_t *ptl; #else spinlock_t ptl; @@ -443,6 +444,14 @@ struct mm_struct { /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; #endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + /* + * An operation with batched TLB flushing is going on. Anything that + * can move process memory needs to flush the TLB when moving a + * PROT_NONE or PROT_NUMA mapped page. + */ + bool tlb_flush_pending; +#endif struct uprobes_state uprobes_state; }; @@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return mm->cpu_vm_mask_var; } +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +/* + * Memory barriers to keep this state in sync are graciously provided by + * the page table locks, outside of which no page table modifications happen. + * The barriers below prevent the compiler from re-ordering the instructions + * around the memory barriers that are already present in the code. + */ +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + return mm->tlb_flush_pending; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ + mm->tlb_flush_pending = true; + + /* + * Guarantee that the tlb_flush_pending store does not leak into the + * critical section updating the page tables + */ + smp_mb__before_spinlock(); +} +/* Clearing is done after a TLB flush, which also provides a barrier. */ +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + mm->tlb_flush_pending = false; +} +#else +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + return false; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ +} +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ +} +#endif + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/net.h b/include/linux/net.h index 4bcee94..69be3e6 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -181,7 +181,7 @@ struct proto_ops { int offset, size_t size, int flags); ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); - void (*set_peek_off)(struct sock *sk, int val); + int (*set_peek_off)(struct sock *sk, int val); }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7f0ed42..d9a550b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1255,7 +1255,7 @@ struct net_device { unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ - unsigned char neigh_priv_len; + unsigned short neigh_priv_len; unsigned short dev_id; /* Used to differentiate devices * that share the same link * layer address diff --git a/include/linux/pci.h b/include/linux/pci.h index 1084a15..a13d682 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -960,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); int pci_select_bars(struct pci_dev *dev, unsigned long flags); +bool pci_device_is_present(struct pci_dev *pdev); /* ROM control related routines */ int pci_enable_rom(struct pci_dev *pdev); @@ -1567,65 +1568,65 @@ enum pci_fixup_pass { /* Anonymous variables would be nice... */ #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ class_shift, hook) \ - static const struct pci_fixup __pci_fixup_##name __used \ + static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ __attribute__((__section__(#section), aligned((sizeof(void *))))) \ = { vendor, device, class, class_shift, hook }; #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##vendor##device##hook, vendor, device, class, \ + resume##hook, vendor, device, class, \ class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##vendor##device##hook, vendor, device, \ + resume_early##hook, vendor, device, \ class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##vendor##device##hook, vendor, device, class, \ + suspend##hook, vendor, device, class, \ class_shift, hook) #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##vendor##device##hook, vendor, device, \ + resume##hook, vendor, device, \ PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##vendor##device##hook, vendor, device, \ + resume_early##hook, vendor, device, \ PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##vendor##device##hook, vendor, device, \ + suspend##hook, vendor, device, \ PCI_ANY_ID, 0, hook) #ifdef CONFIG_PCI_QUIRKS diff --git a/include/linux/pstore.h b/include/linux/pstore.h index abd437d..ece0c6b 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -51,6 +51,7 @@ struct pstore_info { char *buf; size_t bufsize; struct mutex read_mutex; /* serialize open/read/close */ + int flags; int (*open)(struct pstore_info *psi); int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, @@ -70,6 +71,8 @@ struct pstore_info { void *data; }; +#define PSTORE_FLAGS_FRAGILE 1 + #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f..9e7db9e 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); * Architecture-specific implementations of sys_reboot commands. */ +extern void migrate_to_reboot_cpu(void); extern void machine_restart(char *cmd); extern void machine_halt(void); extern void machine_power_off(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 768b037..53f97eb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -440,8 +440,6 @@ struct task_cputime { .sum_exec_runtime = 0, \ } -#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) - #ifdef CONFIG_PREEMPT_COUNT #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) #else @@ -932,7 +930,8 @@ struct pipe_inode_info; struct uts_namespace; struct load_weight { - unsigned long weight, inv_weight; + unsigned long weight; + u32 inv_weight; }; struct sched_avg { diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 30aa0dc..9d55438 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -47,6 +47,8 @@ extern int shmem_init(void); extern int shmem_fill_super(struct super_block *sb, void *data, int silent); extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); +extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, + unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern void shmem_unlock_mapping(struct address_space *mapping); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bec1cc7..215b5ea 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2263,6 +2263,24 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +/** + * pskb_trim_rcsum - trim received skb and update checksum + * @skb: buffer to trim + * @len: new length + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. + */ + +static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (likely(len >= skb->len)) + return 0; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __pskb_trim(skb, len); +} + #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -2360,27 +2378,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); -/** - * pskb_trim_rcsum - trim received skb and update checksum - * @skb: buffer to trim - * @len: new length - * - * This is exactly the same as pskb_trim except that it ensures the - * checksum of received packets are still valid after the operation. - */ - -static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) -{ - if (likely(len >= skb->len)) - return 0; - if (skb->ip_summed == CHECKSUM_COMPLETE) { - __wsum adj = skb_checksum(skb, len, skb->len - len, 0); - - skb->csum = csum_sub(skb->csum, adj); - } - return __pskb_trim(skb, len); -} - static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) { diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index bd8218b..941055e 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -83,7 +83,7 @@ struct vb2_fileio_data; struct vb2_mem_ops { void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); void (*put)(void *buf_priv); - struct dma_buf *(*get_dmabuf)(void *buf_priv); + struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags); void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index eb198ac..488316e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -110,7 +110,8 @@ struct frag_hdr { __be32 identification; }; -#define IP6_MF 0x0001 +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 #include <net/sock.h> diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ea0ca5f..67b5d00 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1726,12 +1726,6 @@ struct sctp_association { /* How many duplicated TSNs have we seen? */ int numduptsns; - /* Number of seconds of idle time before an association is closed. - * In the association context, this is really used as a boolean - * since the real timeout is stored in the timeouts array - */ - __u32 autoclose; - /* These are to support * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses * and Enforcement of Flow and Message Limits" diff --git a/include/net/sock.h b/include/net/sock.h index e3a18ff..2ef3c3e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1035,7 +1035,6 @@ enum cg_proto_flags { }; struct cg_proto { - void (*enter_memory_pressure)(struct sock *sk); struct res_counter memory_allocated; /* Current allocated memory. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; @@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - if (cg_proto->memory_pressure) - cg_proto->memory_pressure = 0; + cg_proto->memory_pressure = 0; } } @@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - cg_proto->enter_memory_pressure(sk); + cg_proto->memory_pressure = 1; } sk->sk_prot->enter_memory_pressure(sk); diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index af99839..5f73785 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, { struct snd_sg_buf *sgbuf = dmab->private_data; dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; - addr &= PAGE_MASK; + addr &= ~((dma_addr_t)PAGE_SIZE - 1); return addr + offset % PAGE_SIZE; } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6..321301c 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -517,10 +517,6 @@ struct se_node_acl { u32 acl_index; #define MAX_ACL_TAG_SIZE 64 char acl_tag[MAX_ACL_TAG_SIZE]; - u64 num_cmds; - u64 read_bytes; - u64 write_bytes; - spinlock_t stats_lock; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t acl_pr_ref_count; struct se_dev_entry **device_list; @@ -624,6 +620,7 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; }; diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912..f854ca4 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -75,6 +75,7 @@ #define DRM_VMW_PARAM_FIFO_CAPS 4 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 +#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 /** * struct drm_vmw_getparam_arg diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1802d6..959d454 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -679,6 +679,7 @@ enum perf_event_type { * * { u64 weight; } && PERF_SAMPLE_WEIGHT * { u64 data_src; } && PERF_SAMPLE_DATA_SRC + * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * }; */ PERF_RECORD_SAMPLE = 9, diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index d630163..5759810 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -30,7 +30,7 @@ #include <sound/compress_params.h> -#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1) +#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2) /** * struct snd_compressed_buffer: compressed buffer * @fragment_size: size of buffer fragment in bytes @@ -67,8 +67,8 @@ struct snd_compr_params { struct snd_compr_tstamp { __u32 byte_offset; __u32 copied_total; - snd_pcm_uframes_t pcm_frames; - snd_pcm_uframes_t pcm_io_frames; + __u32 pcm_frames; + __u32 pcm_io_frames; __u32 sampling_rate; }; diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 65e1209..ae665ac 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -146,7 +146,7 @@ struct blkif_request_segment_aligned { struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -163,7 +163,7 @@ struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -175,7 +175,7 @@ struct blkif_request_discard { struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -184,7 +184,7 @@ struct blkif_request_other { struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; @@ -192,7 +192,7 @@ struct blkif_request_indirect { blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ diff --git a/init/Kconfig b/init/Kconfig index 79383d3..4e5d96a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK config ARCH_SUPPORTS_NUMA_BALANCING bool +# +# For architectures that know their GCC __int128 support is sound +# +config ARCH_SUPPORTS_INT128 + bool + # For architectures that (ab)use NUMA to represent different memory regions # all cpu-local but of different latencies, such as SuperH. # diff --git a/kernel/.gitignore b/kernel/.gitignore index b3097bd..790d83c 100644 --- a/kernel/.gitignore +++ b/kernel/.gitignore @@ -5,3 +5,4 @@ config_data.h config_data.gz timeconst.h hz.bc +x509_certificate_list diff --git a/kernel/Makefile b/kernel/Makefile index bbaf7d5..bc010ee 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE ############################################################################### ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) -X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 -X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ +X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 +X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ $(or $(realpath $(CERT)),$(CERT)))) +X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) ifeq ($(X509_CERTIFICATES),) $(warning *** No X.509 certificates found ***) @@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list targets += $(obj)/.x509.list $(obj)/.x509.list: @echo $(X509_CERTIFICATES) >$@ +endif clean-files := x509_certificate_list .x509.list -endif ifeq ($(CONFIG_MODULE_SIG),y) ############################################################################### diff --git a/kernel/bounds.c b/kernel/bounds.c index 5253204..9fd4246 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -22,6 +22,6 @@ void foo(void) #ifdef CONFIG_SMP DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); #endif - DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); + DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); /* End of constants */ } diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc..f574401 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, if (event->state != PERF_EVENT_STATE_ACTIVE) return; + perf_pmu_disable(event->pmu); + event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; @@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, ctx->nr_freq--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; + + perf_pmu_enable(event->pmu); } static void @@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); + int ret = 0; if (event->state <= PERF_EVENT_STATE_OFF) return 0; @@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, */ smp_wmb(); + perf_pmu_disable(event->pmu); + if (event->pmu->add(event, PERF_EF_START)) { event->state = PERF_EVENT_STATE_INACTIVE; event->oncpu = -1; - return -EAGAIN; + ret = -EAGAIN; + goto out; } event->tstamp_running += tstamp - event->tstamp_stopped; @@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, if (event->attr.exclusive) cpuctx->exclusive = 1; - return 0; +out: + perf_pmu_enable(event->pmu); + + return ret; } static int @@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, if (!event_filter_match(event)) continue; + perf_pmu_disable(event->pmu); + hwc = &event->hw; if (hwc->interrupts == MAX_INTERRUPTS) { @@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, } if (!event->attr.freq || !event->attr.sample_freq) - continue; + goto next; /* * stop the event and update event->count @@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, perf_adjust_period(event, period, delta, false); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); + next: + perf_pmu_enable(event->pmu); } perf_pmu_enable(ctx->pmu); diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be..5721f0e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) spin_lock_init(&mm->page_table_lock); mm_init_aio(mm); mm_init_owner(mm, p); + clear_tlb_flush_pending(mm); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; diff --git a/kernel/futex.c b/kernel/futex.c index 80ba086..f6ff019 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) return -EINVAL; address -= key->both.offset; + if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) + return -EFAULT; + /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs @@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) * but access_ok() should be faster than find_vma() */ if (!fshared) { - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) - return -EFAULT; key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); @@ -288,7 +289,7 @@ again: put_page(page); /* serialize against __split_huge_page_splitting() */ local_irq_disable(); - if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { + if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) { page_head = compound_head(page); /* * page_head is valid pointer but we must pin diff --git a/kernel/kexec.c b/kernel/kexec.c index 490afc0..9c97016 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; size_t vmcoreinfo_size; size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); +/* Flag to indicate we are going to kexec a new kernel */ +bool kexec_in_progress = false; + /* Location of the reserved area for the crash kernel */ struct resource crashk_res = { .name = "Crash kernel", @@ -1675,7 +1678,9 @@ int kernel_kexec(void) } else #endif { + kexec_in_progress = true; kernel_restart_prepare(NULL); + migrate_to_reboot_cpu(); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); } diff --git a/kernel/reboot.c b/kernel/reboot.c index f813b34..662c83f 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(unregister_reboot_notifier); -static void migrate_to_reboot_cpu(void) +void migrate_to_reboot_cpu(void) { /* The boot cpu is always logical cpu 0 */ int cpu = reboot_cpu; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85cda2..a88f4a4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); static void update_top_cache_domain(int cpu) { struct sched_domain *sd; + struct sched_domain *busy_sd = NULL; int id = cpu; int size = 1; @@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu) if (sd) { id = cpumask_first(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd)); - sd = sd->parent; /* sd_busy */ + busy_sd = sd->parent; /* sd_busy */ } - rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); + rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); per_cpu(sd_llc_size, cpu) = size; @@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) * die on a /0 trap. */ sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); + sg->sgp->power_orig = sg->sgp->power; /* * Make sure the first group of this domain contains the diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd773ad..c7395d9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -178,59 +178,61 @@ void sched_init_granularity(void) update_sysctl(); } -#if BITS_PER_LONG == 32 -# define WMULT_CONST (~0UL) -#else -# define WMULT_CONST (1UL << 32) -#endif - +#define WMULT_CONST (~0U) #define WMULT_SHIFT 32 -/* - * Shift right and round: - */ -#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) +static void __update_inv_weight(struct load_weight *lw) +{ + unsigned long w; + + if (likely(lw->inv_weight)) + return; + + w = scale_load_down(lw->weight); + + if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) + lw->inv_weight = 1; + else if (unlikely(!w)) + lw->inv_weight = WMULT_CONST; + else + lw->inv_weight = WMULT_CONST / w; +} /* - * delta *= weight / lw + * delta_exec * weight / lw.weight + * OR + * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT + * + * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case + * we're guaranteed shift stays positive because inv_weight is guaranteed to + * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. + * + * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus + * weight/lw.weight <= 1, and therefore our shift will also be positive. */ -static unsigned long -calc_delta_mine(unsigned long delta_exec, unsigned long weight, - struct load_weight *lw) +static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) { - u64 tmp; - - /* - * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched - * entities since MIN_SHARES = 2. Treat weight as 1 if less than - * 2^SCHED_LOAD_RESOLUTION. - */ - if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) - tmp = (u64)delta_exec * scale_load_down(weight); - else - tmp = (u64)delta_exec; + u64 fact = scale_load_down(weight); + int shift = WMULT_SHIFT; - if (!lw->inv_weight) { - unsigned long w = scale_load_down(lw->weight); + __update_inv_weight(lw); - if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) - lw->inv_weight = 1; - else if (unlikely(!w)) - lw->inv_weight = WMULT_CONST; - else - lw->inv_weight = WMULT_CONST / w; + if (unlikely(fact >> 32)) { + while (fact >> 32) { + fact >>= 1; + shift--; + } } - /* - * Check whether we'd overflow the 64-bit multiplication: - */ - if (unlikely(tmp > WMULT_CONST)) - tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, - WMULT_SHIFT/2); - else - tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); + /* hint to use a 32x32->64 mul */ + fact = (u64)(u32)fact * lw->inv_weight; + + while (fact >> 32) { + fact >>= 1; + shift--; + } - return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); + return mul_u64_u32_shr(delta_exec, fact, shift); } @@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) #endif /* CONFIG_FAIR_GROUP_SCHED */ static __always_inline -void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); +void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); /************************************************************** * Scheduling class tree data structure manipulation methods: @@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write, /* * delta /= w */ -static inline unsigned long -calc_delta_fair(unsigned long delta, struct sched_entity *se) +static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) { if (unlikely(se->load.weight != NICE_0_LOAD)) - delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); + delta = __calc_delta(delta, NICE_0_LOAD, &se->load); return delta; } @@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_add(&lw, se->load.weight); load = &lw; } - slice = calc_delta_mine(slice, se->load.weight, load); + slice = __calc_delta(slice, se->load.weight, load); } return slice; } @@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p) #endif /* - * Update the current task's runtime statistics. Skip current tasks that - * are not in our scheduling class. + * Update the current task's runtime statistics. */ -static inline void -__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, - unsigned long delta_exec) -{ - unsigned long delta_exec_weighted; - - schedstat_set(curr->statistics.exec_max, - max((u64)delta_exec, curr->statistics.exec_max)); - - curr->sum_exec_runtime += delta_exec; - schedstat_add(cfs_rq, exec_clock, delta_exec); - delta_exec_weighted = calc_delta_fair(delta_exec, curr); - - curr->vruntime += delta_exec_weighted; - update_min_vruntime(cfs_rq); -} - static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; u64 now = rq_clock_task(rq_of(cfs_rq)); - unsigned long delta_exec; + u64 delta_exec; if (unlikely(!curr)) return; - /* - * Get the amount of time the current task was running - * since the last time we changed load (this cannot - * overflow on 32 bits): - */ - delta_exec = (unsigned long)(now - curr->exec_start); - if (!delta_exec) + delta_exec = now - curr->exec_start; + if (unlikely((s64)delta_exec <= 0)) return; - __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; + schedstat_set(curr->statistics.exec_max, + max(delta_exec, curr->statistics.exec_max)); + + curr->sum_exec_runtime += delta_exec; + schedstat_add(cfs_rq, exec_clock, delta_exec); + + curr->vruntime += calc_delta_fair(delta_exec, curr); + update_min_vruntime(cfs_rq); + if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); @@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work) (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) continue; + /* + * Skip inaccessible VMAs to avoid any confusion between + * PROT_NONE and NUMA hinting ptes + */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + continue; + do { start = max(start, vma->vm_start); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); @@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) } } -static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, - unsigned long delta_exec) +static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ cfs_rq->runtime_remaining -= delta_exec; @@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, } static __always_inline -void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) +void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) return; @@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) return rq_clock_task(rq_of(cfs_rq)); } -static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, - unsigned long delta_exec) {} +static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7d57275..1c40655 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * Change rq's cpupri only if rt_rq is the top queue. + */ + if (&rq->rt != rt_rq) + return; +#endif if (rq->online && prio < prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, prio); } @@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * Change rq's cpupri only if rt_rq is the top queue. + */ + if (&rq->rt != rt_rq) + return; +#endif if (rq->online && rt_rq->highest_prio.curr != prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S index 4aef390..3e9868d47 100644 --- a/kernel/system_certificates.S +++ b/kernel/system_certificates.S @@ -3,8 +3,18 @@ __INITRODATA + .align 8 .globl VMLINUX_SYMBOL(system_certificate_list) VMLINUX_SYMBOL(system_certificate_list): +__cert_list_start: .incbin "kernel/x509_certificate_list" - .globl VMLINUX_SYMBOL(system_certificate_list_end) -VMLINUX_SYMBOL(system_certificate_list_end): +__cert_list_end: + + .align 8 + .globl VMLINUX_SYMBOL(system_certificate_list_size) +VMLINUX_SYMBOL(system_certificate_list_size): +#ifdef CONFIG_64BIT + .quad __cert_list_end - __cert_list_start +#else + .long __cert_list_end - __cert_list_start +#endif diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c index 564dd93..52ebc70 100644 --- a/kernel/system_keyring.c +++ b/kernel/system_keyring.c @@ -22,7 +22,7 @@ struct key *system_trusted_keyring; EXPORT_SYMBOL_GPL(system_trusted_keyring); extern __initconst const u8 system_certificate_list[]; -extern __initconst const u8 system_certificate_list_end[]; +extern __initconst const unsigned long system_certificate_list_size; /* * Load the compiled-in keys @@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void) pr_notice("Loading compiled-in X.509 certificates\n"); - end = system_certificate_list_end; p = system_certificate_list; + end = p + system_certificate_list_size; while (p < end) { /* Each cert begins with an ASN.1 SEQUENCE tag and must be more * than 256 bytes in size. diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9f9ea..72a0f81 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -775,7 +775,7 @@ static int ftrace_profile_init(void) int cpu; int ret = 0; - for_each_online_cpu(cpu) { + for_each_possible_cpu(cpu) { ret = ftrace_profile_init_cpu(cpu); if (ret) break; diff --git a/kernel/user.c b/kernel/user.c index a3a0dbf..c006131 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -51,9 +51,9 @@ struct user_namespace init_user_ns = { .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .proc_inum = PROC_USER_INIT_INO, -#ifdef CONFIG_KEYS_KERBEROS_CACHE - .krb_cache_register_sem = - __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), +#ifdef CONFIG_PERSISTENT_KEYRINGS + .persistent_keyring_register_sem = + __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), #endif }; EXPORT_SYMBOL_GPL(init_user_ns); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c66912be..b010eac 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2851,19 +2851,6 @@ already_gone: return false; } -static bool __flush_work(struct work_struct *work) -{ - struct wq_barrier barr; - - if (start_flush_work(work, &barr)) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); - return true; - } else { - return false; - } -} - /** * flush_work - wait for a work to finish executing the last queueing instance * @work: the work to flush @@ -2877,10 +2864,18 @@ static bool __flush_work(struct work_struct *work) */ bool flush_work(struct work_struct *work) { + struct wq_barrier barr; + lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - return __flush_work(work); + if (start_flush_work(work, &barr)) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + return true; + } else { + return false; + } } EXPORT_SYMBOL_GPL(flush_work); @@ -4832,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); schedule_work_on(cpu, &wfc.work); - - /* - * The work item is on-stack and can't lead to deadlock through - * flushing. Use __flush_work() to avoid spurious lockdep warnings - * when work_on_cpu()s are nested. - */ - __flush_work(&wfc.work); - + flush_work(&wfc.work); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 17edeaf..1b6a44f 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -759,8 +759,8 @@ all_leaves_cluster_together: pr_devel("all leaves cluster together\n"); diff = INT_MAX; for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { - int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf), - assoc_array_ptr_to_leaf(node->slots[i])); + int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]), + index_key); if (x < diff) { BUG_ON(x < 0); diff = x; @@ -543,7 +543,7 @@ config ZSWAP config MEM_SOFT_DIRTY bool "Track memory changes" - depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY + depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS select PROC_PAGE_MONITOR help This option enables memory changes tracking by introducing a diff --git a/mm/compaction.c b/mm/compaction.c index 805165b..f58bcd0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc, bool migrate_scanner) { struct zone *zone = cc->zone; + + if (cc->ignore_skip_hint) + return; + if (!page) return; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bccd5a6..7de1bf8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ret = 0; goto out_unlock; } + + /* mmap_sem prevents this happening but warn if that changes */ + WARN_ON(pmd_trans_migrating(pmd)); + if (unlikely(pmd_trans_splitting(pmd))) { /* split huge page running from under us */ spin_unlock(src_ptl); @@ -1243,6 +1247,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) return ERR_PTR(-EFAULT); + /* Full NUMA hinting faults to serialise migration in fault paths */ + if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) + goto out; + page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); if (flags & FOLL_TOUCH) { @@ -1295,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!pmd_same(pmd, *pmdp))) goto out_unlock; + /* + * If there are potential migrations, wait for completion and retry + * without disrupting NUMA hinting information. Do not relock and + * check_same as the page may no longer be mapped. + */ + if (unlikely(pmd_trans_migrating(*pmdp))) { + spin_unlock(ptl); + wait_migrate_huge_page(vma->anon_vma, pmdp); + goto out; + } + page = pmd_page(pmd); BUG_ON(is_huge_zero_page(page)); page_nid = page_to_nid(page); @@ -1323,23 +1342,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* If the page was locked, there are no parallel migrations */ if (page_locked) goto clear_pmdnuma; + } - /* - * Otherwise wait for potential migrations and retry. We do - * relock and check_same as the page may no longer be mapped. - * As the fault is being retried, do not account for it. - */ + /* Migration could have started since the pmd_trans_migrating check */ + if (!page_locked) { spin_unlock(ptl); wait_on_page_locked(page); page_nid = -1; goto out; } - /* Page is misplaced, serialise migrations and parallel THP splits */ + /* + * Page is misplaced. Page lock serialises migrations. Acquire anon_vma + * to serialises splits + */ get_page(page); spin_unlock(ptl); - if (!page_locked) - lock_page(page); anon_vma = page_lock_anon_vma_read(page); /* Confirm the PMD did not change while page_table_lock was released */ @@ -1351,6 +1369,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out_unlock; } + /* Bail if we fail to protect against THP splits for any reason */ + if (unlikely(!anon_vma)) { + put_page(page); + page_nid = -1; + goto clear_pmdnuma; + } + /* * Migrate the THP to the requested node, returns with page unlocked * and pmd_numa cleared. @@ -1481,8 +1506,18 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); VM_BUG_ON(!pmd_none(*new_pmd)); set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); - if (new_ptl != old_ptl) + if (new_ptl != old_ptl) { + pgtable_t pgtable; + + /* + * Move preallocated PTE page table if new_pmd is on + * different PMD page table. + */ + pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); + pgtable_trans_huge_deposit(mm, new_pmd, pgtable); + spin_unlock(new_ptl); + } spin_unlock(old_ptl); } out: @@ -1507,6 +1542,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ret = 1; if (!prot_numa) { entry = pmdp_get_and_clear(mm, addr, pmd); + if (pmd_numa(entry)) + entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; BUG_ON(pmd_write(entry)); @@ -1521,7 +1558,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = pmdp_get_and_clear(mm, addr, pmd); + entry = *pmd; entry = pmd_mknuma(entry); ret = HPAGE_PMD_NR; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f1a0ae6..bf5e894 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2694,7 +2694,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, goto bypass; if (unlikely(task_in_memcg_oom(current))) - goto bypass; + goto nomem; + + if (gfp_mask & __GFP_NOFAIL) + oom = false; /* * We always charge the cgroup the mm_struct belongs to. @@ -6352,6 +6355,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) static void mem_cgroup_css_free(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); + /* + * XXX: css_offline() would be where we should reparent all + * memory to prepare the cgroup for destruction. However, + * memcg does not do css_tryget() and res_counter charging + * under the same RCU lock region, which means that charging + * could race with offlining. Offlining only happens to + * cgroups with no tasks in them but charges can show up + * without any tasks from the swapin path when the target + * memcg is looked up from the swapout record and not from the + * current task as it usually is. A race like this can leak + * charges and put pages with stale cgroup pointers into + * circulation: + * + * #0 #1 + * lookup_swap_cgroup_id() + * rcu_read_lock() + * mem_cgroup_lookup() + * css_tryget() + * rcu_read_unlock() + * disable css_tryget() + * call_rcu() + * offline_css() + * reparent_charges() + * res_counter_charge() + * css_put() + * css_free() + * pc->mem_cgroup = dead memcg + * add page to lru + * + * The bulk of the charges are still moved in offline_css() to + * avoid pinning a lot of pages in case a long-term reference + * like a swapout record is deferring the css_free() to long + * after offlining. But this makes sure we catch any charges + * made after offlining: + */ + mem_cgroup_reparent_charges(memcg); memcg_destroy_kmem(memcg); __mem_cgroup_free(memcg); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b7c1716..db08af9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1505,10 +1505,16 @@ static int soft_offline_huge_page(struct page *page, int flags) if (ret > 0) ret = -EIO; } else { - set_page_hwpoison_huge_page(hpage); - dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), - &num_poisoned_pages); + /* overcommit hugetlb page will be freed to buddy */ + if (PageHuge(page)) { + set_page_hwpoison_huge_page(hpage); + dequeue_hwpoisoned_huge_page(hpage); + atomic_long_add(1 << compound_order(hpage), + &num_poisoned_pages); + } else { + SetPageHWPoison(page); + atomic_long_inc(&num_poisoned_pages); + } } return ret; } diff --git a/mm/memory.c b/mm/memory.c index 5d9025f..6768ce9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS +#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS bool ptlock_alloc(struct page *page) { spinlock_t *ptl; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eca4a31..0cd2c4d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int * break; vma = vma->vm_next; } + + if (PageHuge(page)) { + if (vma) + return alloc_huge_page_noerr(vma, address, 1); + else + return NULL; + } /* - * queue_pages_range() confirms that @page belongs to some vma, - * so vma shouldn't be NULL. + * if !vma, alloc_page_vma() will use task or system default policy */ - BUG_ON(!vma); - - if (PageHuge(page)) - return alloc_huge_page_noerr(vma, address, 1); return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); } #else @@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; } else - putback_lru_pages(&pagelist); + putback_movable_pages(&pagelist); up_write(&mm->mmap_sem); mpol_out: diff --git a/mm/migrate.c b/mm/migrate.c index bb94004..9194375 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -36,6 +36,7 @@ #include <linux/hugetlb_cgroup.h> #include <linux/gfp.h> #include <linux/balloon_compaction.h> +#include <linux/mmu_notifier.h> #include <asm/tlbflush.h> @@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, */ int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode) + struct buffer_head *head, enum migrate_mode mode, + int extra_count) { - int expected_count = 0; + int expected_count = 1 + extra_count; void **pslot; if (!mapping) { /* Anonymous page without mapping */ - if (page_count(page) != 1) + if (page_count(page) != expected_count) return -EAGAIN; return MIGRATEPAGE_SUCCESS; } @@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping, pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); - expected_count = 2 + page_has_private(page); + expected_count += 1 + page_has_private(page); if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); @@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping, BUG_ON(PageWriteback(page)); /* Writeback must be complete */ - rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); if (rc != MIGRATEPAGE_SUCCESS) return rc; @@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping, head = page_buffers(page); - rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); + rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); if (rc != MIGRATEPAGE_SUCCESS) return rc; @@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) return 1; } +bool pmd_trans_migrating(pmd_t pmd) +{ + struct page *page = pmd_page(pmd); + return PageLocked(page); +} + +void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ + struct page *page = pmd_page(*pmd); + wait_on_page_locked(page); +} + /* * Attempt to migrate a misplaced page to the specified destination * node. Caller is expected to have an elevated reference count on @@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, struct page *page, int node) { spinlock_t *ptl; - unsigned long haddr = address & HPAGE_PMD_MASK; pg_data_t *pgdat = NODE_DATA(node); int isolated = 0; struct page *new_page = NULL; struct mem_cgroup *memcg = NULL; int page_lru = page_is_file_cache(page); + unsigned long mmun_start = address & HPAGE_PMD_MASK; + unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; + pmd_t orig_entry; /* * Rate-limit the amount of data that is being migrated to a node. @@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, goto out_fail; } + if (mm_tlb_flush_pending(mm)) + flush_tlb_range(vma, mmun_start, mmun_end); + /* Prepare a page as a migration target */ __set_page_locked(new_page); SetPageSwapBacked(new_page); @@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, WARN_ON(PageLRU(new_page)); /* Recheck the target PMD */ + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_same(*pmd, entry))) { + if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { +fail_putback: spin_unlock(ptl); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); /* Reverse changes made by migrate_page_copy() */ if (TestClearPageActive(new_page)) @@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, putback_lru_page(page); mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); - goto out_fail; + + goto out_unlock; } /* @@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, */ mem_cgroup_prepare_migration(page, new_page, &memcg); + orig_entry = *pmd; entry = mk_pmd(new_page, vma->vm_page_prot); - entry = pmd_mknonnuma(entry); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - pmdp_clear_flush(vma, haddr, pmd); - set_pmd_at(mm, haddr, pmd, entry); - page_add_new_anon_rmap(new_page, vma, haddr); + /* + * Clear the old entry under pagetable lock and establish the new PTE. + * Any parallel GUP will either observe the old page blocking on the + * page lock, block on the page table lock or observe the new page. + * The SetPageUptodate on the new page and page_add_new_anon_rmap + * guarantee the copy is visible before the pagetable update. + */ + flush_cache_range(vma, mmun_start, mmun_end); + page_add_new_anon_rmap(new_page, vma, mmun_start); + pmdp_clear_flush(vma, mmun_start, pmd); + set_pmd_at(mm, mmun_start, pmd, entry); + flush_tlb_range(vma, mmun_start, mmun_end); update_mmu_cache_pmd(vma, address, &entry); + + if (page_count(page) != 2) { + set_pmd_at(mm, mmun_start, pmd, orig_entry); + flush_tlb_range(vma, mmun_start, mmun_end); + update_mmu_cache_pmd(vma, address, &entry); + page_remove_rmap(new_page); + goto fail_putback; + } + page_remove_rmap(page); + /* * Finish the charge transaction under the page table lock to * prevent split_huge_page() from dividing up the charge @@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, */ mem_cgroup_end_migration(memcg, page, new_page, true); spin_unlock(ptl); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); unlock_page(new_page); unlock_page(page); @@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, out_fail: count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); out_dropref: - entry = pmd_mknonnuma(entry); - set_pmd_at(mm, haddr, pmd, entry); - update_mmu_cache_pmd(vma, address, &entry); + ptl = pmd_lock(mm, pmd); + if (pmd_same(*pmd, entry)) { + entry = pmd_mknonnuma(entry); + set_pmd_at(mm, mmun_start, pmd, entry); + update_mmu_cache_pmd(vma, address, &entry); + } + spin_unlock(ptl); +out_unlock: unlock_page(page); put_page(page); return 0; diff --git a/mm/mprotect.c b/mm/mprotect.c index 2666797..bb53a65 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, pte_t ptent; bool updated = false; - ptent = ptep_modify_prot_start(mm, addr, pte); if (!prot_numa) { + ptent = ptep_modify_prot_start(mm, addr, pte); + if (pte_numa(ptent)) + ptent = pte_mknonnuma(ptent); ptent = pte_modify(ptent, newprot); updated = true; } else { struct page *page; + ptent = *pte; page = vm_normal_page(vma, addr, oldpte); if (page) { if (!pte_numa(oldpte)) { ptent = pte_mknuma(ptent); + set_pte_at(mm, addr, pte, ptent); updated = true; } } @@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (updated) pages++; - ptep_modify_prot_commit(mm, addr, pte, ptent); + + /* Only !prot_numa always clears the pte */ + if (!prot_numa) + ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); @@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); + set_tlb_flush_pending(mm); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) @@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, /* Only flush the TLB if we actually modified any entries: */ if (pages) flush_tlb_range(vma, start, end); + clear_tlb_flush_pending(mm); return pages; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 580a5f0..5248fe0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) static bool zone_local(struct zone *local_zone, struct zone *zone) { - return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; + return local_zone->node == zone->node; } static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) @@ -1913,18 +1913,17 @@ zonelist_scan: * page was allocated in should have no effect on the * time the page has in memory before being reclaimed. * - * When zone_reclaim_mode is enabled, try to stay in - * local zones in the fastpath. If that fails, the - * slowpath is entered, which will do another pass - * starting with the local zones, but ultimately fall - * back to remote zones that do not partake in the - * fairness round-robin cycle of this zonelist. + * Try to stay in local zones in the fastpath. If + * that fails, the slowpath is entered, which will do + * another pass starting with the local zones, but + * ultimately fall back to remote zones that do not + * partake in the fairness round-robin cycle of this + * zonelist. */ if (alloc_flags & ALLOC_WMARK_LOW) { if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) continue; - if (zone_reclaim_mode && - !zone_local(preferred_zone, zone)) + if (!zone_local(preferred_zone, zone)) continue; } /* @@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, * thrash fairness information for zones that are not * actually part of this zonelist's round-robin cycle. */ - if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) + if (!zone_local(preferred_zone, zone)) continue; mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index cbb3854..a8b9199 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + struct mm_struct *mm = (vma)->vm_mm; pte_t pte; - pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); - if (pte_accessible(pte)) + pte = ptep_get_and_clear(mm, address, ptep); + if (pte_accessible(mm, pte)) flush_tlb_page(vma, address); return pte; } @@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { + pmd_t entry = *pmdp; + if (pmd_numa(entry)) + entry = pmd_mknonnuma(entry); set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } @@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, spinlock_t *ptl; if (unlikely(PageHuge(page))) { + /* when pud is not present, pte will be NULL */ pte = huge_pte_offset(mm, address); + if (!pte) + return NULL; + ptl = huge_pte_lockptr(page_hstate(page), mm, pte); goto check; } @@ -2918,13 +2918,8 @@ static struct dentry_operations anon_ops = { .d_dname = simple_dname }; -/** - * shmem_file_setup - get an unlinked file living in tmpfs - * @name: name for dentry (to be seen in /proc/<pid>/maps - * @size: size to be set for the file - * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size - */ -struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) +static struct file *__shmem_file_setup(const char *name, loff_t size, + unsigned long flags, unsigned int i_flags) { struct file *res; struct inode *inode; @@ -2957,6 +2952,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags if (!inode) goto put_dentry; + inode->i_flags |= i_flags; d_instantiate(path.dentry, inode); inode->i_size = size; clear_nlink(inode); /* It is unlinked */ @@ -2977,6 +2973,32 @@ put_memory: shmem_unacct_size(flags, size); return res; } + +/** + * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be + * kernel internal. There will be NO LSM permission checks against the + * underlying inode. So users of this interface must do LSM checks at a + * higher layer. The one user is the big_key implementation. LSM checks + * are provided at the key level rather than the inode level. + * @name: name for dentry (to be seen in /proc/<pid>/maps + * @size: size to be set for the file + * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size + */ +struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) +{ + return __shmem_file_setup(name, size, flags, S_PRIVATE); +} + +/** + * shmem_file_setup - get an unlinked file living in tmpfs + * @name: name for dentry (to be seen in /proc/<pid>/maps + * @size: size to be set for the file + * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size + */ +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) +{ + return __shmem_file_setup(name, size, flags, 0); +} EXPORT_SYMBOL_GPL(shmem_file_setup); /** diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 229d820..045d56e 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br, int br_handle_frame_finish(struct sk_buff *skb); rx_handler_result_t br_handle_frame(struct sk_buff **pskb); +static inline bool br_rx_handler_check_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->rx_handler) == br_handle_frame; +} + +static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev) +{ + return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL; +} + /* br_ioctl.c */ int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 8660ea3..bdb459d 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) goto err; - p = br_port_get_rcu(dev); + p = br_port_get_check_rcu(dev); if (!p) goto err; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 9589718..e70301e 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = { .hdrsize = 0, .name = "NET_DM", .version = 2, - .maxattr = NET_DM_CMD_MAX, }; static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ca15f32..36b1443 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh->parms->reachable_time : 0))); neigh->nud_state = new; + notify = 1; } if (lladdr != neigh->ha) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2718fed..06e72d3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; + skb->local_df = 0; skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); diff --git a/net/core/sock.c b/net/core/sock.c index ab20ed9..5393b4b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -882,7 +882,7 @@ set_rcvbuf: case SO_PEEK_OFF: if (sock->ops->set_peek_off) - sock->ops->set_peek_off(sk, val); + ret = sock->ops->set_peek_off(sk, val); else ret = -EOPNOTSUPP; break; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4ac71ff..2b90a78 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -851,7 +851,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - usin->sin6_addr = flowlabel->dst; fl6_sock_release(flowlabel); } } diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 523be38..f2e1573 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -104,7 +104,10 @@ errout: static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) { struct fib_result *result = (struct fib_result *) arg->result; - struct net_device *dev = result->fi->fib_dev; + struct net_device *dev = NULL; + + if (result->fi) + dev = result->fi->fib_dev; /* do not accept result if the route does * not meet the required prefix length diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index f13bd91..a313c3f 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) static struct xt_target synproxy_tg4_reg __read_mostly = { .name = "SYNPROXY", .family = NFPROTO_IPV4, + .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .target = synproxy_tg4, .targetsize = sizeof(struct xt_synproxy_info), .checkentry = synproxy_tg4_check, diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index fff5ba1..4a5e94a 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_reject *priv = nft_expr_priv(expr); - if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) + if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) goto nla_put_failure; switch (priv->type) { diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 269a89e..f7e522c 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -6,13 +6,6 @@ #include <linux/memcontrol.h> #include <linux/module.h> -static void memcg_tcp_enter_memory_pressure(struct sock *sk) -{ - if (sk->sk_cgrp->memory_pressure) - sk->sk_cgrp->memory_pressure = 1; -} -EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure); - int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { /* diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 44f6a20..f140048 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -560,15 +560,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { - struct sock *sk; const struct iphdr *iph = ip_hdr(skb); - if (unlikely(sk = skb_steal_sock(skb))) - return sk; - else - return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, - iph->daddr, dport, inet_iif(skb), - udptable); + return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, + iph->daddr, dport, inet_iif(skb), + udptable); } struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, @@ -1603,12 +1599,16 @@ static void flush_stack(struct sock **stack, unsigned int count, kfree_skb(skb1); } -static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +/* For TCP sockets, sk_rx_dst is protected by socket lock + * For UDP, we use xchg() to guard against concurrent changes. + */ +static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { - struct dst_entry *dst = skb_dst(skb); + struct dst_entry *old; dst_hold(dst); - sk->sk_rx_dst = dst; + old = xchg(&sk->sk_rx_dst, dst); + dst_release(old); } /* @@ -1739,15 +1739,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp4_csum_init(skb, uh, proto)) goto csum_error; - if (skb->sk) { + sk = skb_steal_sock(skb); + if (sk) { + struct dst_entry *dst = skb_dst(skb); int ret; - sk = skb->sk; - if (unlikely(sk->sk_rx_dst == NULL)) - udp_sk_rx_dst_set(sk, skb); + if (unlikely(sk->sk_rx_dst != dst)) + udp_sk_rx_dst_set(sk, dst); ret = udp_queue_rcv_skb(sk, skb); - + sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ @@ -1913,17 +1914,20 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, void udp_v4_early_demux(struct sk_buff *skb) { - const struct iphdr *iph = ip_hdr(skb); - const struct udphdr *uh = udp_hdr(skb); + struct net *net = dev_net(skb->dev); + const struct iphdr *iph; + const struct udphdr *uh; struct sock *sk; struct dst_entry *dst; - struct net *net = dev_net(skb->dev); int dif = skb->dev->ifindex; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) return; + iph = ip_hdr(skb); + uh = udp_hdr(skb); + if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 12c97d8..d5fa5b8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2613,7 +2613,7 @@ static void init_loopback(struct net_device *dev) if (sp_ifa->rt) continue; - sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false); /* Failure cases are ignored */ if (!IS_ERR(sp_rt)) { diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 8dfe1f4..93b1aa3 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -73,7 +73,6 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - usin->sin6_addr = flowlabel->dst; } } diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index e275916..3fd0a57 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -122,7 +122,11 @@ out: static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) { struct rt6_info *rt = (struct rt6_info *) arg->result; - struct net_device *dev = rt->rt6i_idev->dev; + struct net_device *dev = NULL; + + if (rt->rt6i_idev) + dev = rt->rt6i_idev->dev; + /* do not accept result if the route does * not meet the required prefix length */ diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3512177..3008651 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1277,6 +1277,9 @@ skip_linkparms: ri->prefix_len == 0) continue; #endif + if (ri->prefix_len == 0 && + !in6_dev->cnf.accept_ra_defrtr) + continue; if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) continue; rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index f78f41a..a0d1727 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) static struct xt_target synproxy_tg6_reg __read_mostly = { .name = "SYNPROXY", .family = NFPROTO_IPV6, + .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .target = synproxy_tg6, .targetsize = sizeof(struct xt_synproxy_info), .checkentry = synproxy_tg6_check, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 7fb4e14..b6bb87e 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -792,7 +792,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - daddr = &flowlabel->dst; } } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7faa9d5..a0a48ac 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops); static int ip6_pkt_discard(struct sk_buff *skb); static int ip6_pkt_discard_out(struct sk_buff *skb); +static int ip6_pkt_prohibit(struct sk_buff *skb); +static int ip6_pkt_prohibit_out(struct sk_buff *skb); static void ip6_link_failure(struct sk_buff *skb); static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu); @@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = { #ifdef CONFIG_IPV6_MULTIPLE_TABLES -static int ip6_pkt_prohibit(struct sk_buff *skb); -static int ip6_pkt_prohibit_out(struct sk_buff *skb); - static const struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), @@ -1565,21 +1564,24 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } } - rt->dst.output = ip6_pkt_discard_out; - rt->dst.input = ip6_pkt_discard; rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; switch (cfg->fc_type) { case RTN_BLACKHOLE: rt->dst.error = -EINVAL; + rt->dst.output = dst_discard; + rt->dst.input = dst_discard; break; case RTN_PROHIBIT: rt->dst.error = -EACCES; + rt->dst.output = ip6_pkt_prohibit_out; + rt->dst.input = ip6_pkt_prohibit; break; case RTN_THROW: - rt->dst.error = -EAGAIN; - break; default: - rt->dst.error = -ENETUNREACH; + rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN + : -ENETUNREACH; + rt->dst.output = ip6_pkt_discard_out; + rt->dst.input = ip6_pkt_discard; break; } goto install_route; @@ -2144,8 +2146,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb) return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); } -#ifdef CONFIG_IPV6_MULTIPLE_TABLES - static int ip6_pkt_prohibit(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); @@ -2157,8 +2157,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb) return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); } -#endif - /* * Allocate a dst for local (unicast / anycast) address. */ @@ -2168,12 +2166,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, bool anycast) { struct net *net = dev_net(idev->dev); - struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); - - if (!rt) { - net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n"); + struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, + DST_NOCOUNT, NULL); + if (!rt) return ERR_PTR(-ENOMEM); - } in6_dev_hold(idev); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0740f93..f67033b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -156,7 +156,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - usin->sin6_addr = flowlabel->dst; fl6_sock_release(flowlabel); } } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index bcd5699..089c741 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1140,7 +1140,6 @@ do_udp_sendmsg: flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - daddr = &flowlabel->dst; } } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index d9b437e..bb6e206 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -528,7 +528,6 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - daddr = &flowlabel->dst; } } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 95667b0..364ce0c 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1368,7 +1368,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, changed |= ieee80211_mps_set_sta_local_pm(sta, params->local_pm); - ieee80211_bss_info_change_notify(sdata, changed); + ieee80211_mbss_info_change_notify(sdata, changed); #endif } @@ -2488,8 +2488,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - if (sdata->vif.type != NL80211_IFTYPE_STATION && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT) + if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) @@ -3120,9 +3119,17 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, params->chandef.chan->band) return -EINVAL; + ifmsh->chsw_init = true; + if (!ifmsh->pre_value) + ifmsh->pre_value = 1; + else + ifmsh->pre_value++; + err = ieee80211_mesh_csa_beacon(sdata, params, true); - if (err < 0) + if (err < 0) { + ifmsh->chsw_init = false; return err; + } break; #endif default: diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 531be04..27a39de 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -823,6 +823,10 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, if (err) return false; + /* channel switch is not supported, disconnect */ + if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) + goto disconnect; + params.count = csa_ie.count; params.chandef = csa_ie.chandef; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 29dc505..4aea4e7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1228,6 +1228,7 @@ struct ieee80211_csa_ie { u8 mode; u8 count; u8 ttl; + u16 pre_value; }; /* Parsed Information Elements */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index ff101ea..36c3a4c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1325,7 +1325,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.bssid = NULL; break; case NL80211_IFTYPE_AP_VLAN: - break; case NL80211_IFTYPE_P2P_DEVICE: sdata->vif.bss_conf.bssid = sdata->vif.addr; break; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 21d5d44..7d1c3ac 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -940,6 +940,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", result); + local->hw.conf.flags = IEEE80211_CONF_IDLE; + ieee80211_led_init(local); rtnl_lock(); @@ -1047,6 +1049,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) cancel_work_sync(&local->restart_work); cancel_work_sync(&local->reconfig_filter); + flush_work(&local->sched_scan_stopped_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 896fe3b..ba10525 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -943,14 +943,19 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, params.chandef.chan->center_freq); params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; - if (beacon) + if (beacon) { ifmsh->chsw_ttl = csa_ie.ttl - 1; - else - ifmsh->chsw_ttl = 0; + if (ifmsh->pre_value >= csa_ie.pre_value) + return false; + ifmsh->pre_value = csa_ie.pre_value; + } - if (ifmsh->chsw_ttl > 0) + if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) { if (ieee80211_mesh_csa_beacon(sdata, ¶ms, false) < 0) return false; + } else { + return false; + } sdata->csa_radar_required = params.radar_required; @@ -1163,7 +1168,6 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, offset_ttl = (len < 42) ? 7 : 10; *(pos + offset_ttl) -= 1; *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; - sdata->u.mesh.chsw_ttl = *(pos + offset_ttl); memcpy(mgmt_fwd, mgmt, len); eth_broadcast_addr(mgmt_fwd->da); @@ -1182,7 +1186,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, u16 pre_value; bool fwd_csa = true; size_t baselen; - u8 *pos, ttl; + u8 *pos; if (mgmt->u.action.u.measurement.action_code != WLAN_ACTION_SPCT_CHL_SWITCH) @@ -1193,8 +1197,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, u.action.u.chan_switch.variable); ieee802_11_parse_elems(pos, len - baselen, false, &elems); - ttl = elems.mesh_chansw_params_ie->mesh_ttl; - if (!--ttl) + ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; + if (!--ifmsh->chsw_ttl) fwd_csa = false; pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d7504ab..b3a3ce3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1910,6 +1910,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) already = true; + ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; + mutex_unlock(&sdata->local->mtx); if (already) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 5d60779..4096ff6 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -226,7 +226,7 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); nsecs += minstrel_mcs_groups[group].duration[rate]; - tp = 1000000 * ((mr->probability * 1000) / nsecs); + tp = 1000000 * ((prob * 1000) / nsecs); mr->cur_tp = MINSTREL_TRUNC(tp); } @@ -277,13 +277,15 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) if (!(mg->supported & BIT(i))) continue; + index = MCS_GROUP_RATES * group + i; + /* initialize rates selections starting indexes */ if (!mg_rates_valid) { mg->max_tp_rate = mg->max_tp_rate2 = mg->max_prob_rate = i; if (!mi_rates_valid) { mi->max_tp_rate = mi->max_tp_rate2 = - mi->max_prob_rate = i; + mi->max_prob_rate = index; mi_rates_valid = true; } mg_rates_valid = true; @@ -291,7 +293,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) mr = &mg->rates[i]; mr->retry_updated = false; - index = MCS_GROUP_RATES * group + i; minstrel_calc_rate_ewma(mr); minstrel_ht_calc_tp(mi, group, i); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index caecef8..2b0debb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, u16 sc; u8 tid, ack_policy; - if (!ieee80211_is_data_qos(hdr->frame_control)) + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) goto dont_reorder; /* diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 5ad66a8..bcc4833 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -1088,6 +1088,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) trace_api_sched_scan_stopped(local); - ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work); + schedule_work(&local->sched_scan_stopped_work); } EXPORT_SYMBOL(ieee80211_sched_scan_stopped); diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index a40da20..6ab0090 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -78,6 +78,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, if (elems->mesh_chansw_params_ie) { csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; + csa_ie->pre_value = le16_to_cpu( + elems->mesh_chansw_params_ie->mesh_pre_value); } new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 592a181..9f9b9bd 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2278,17 +2278,15 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, radar_detected_work); - struct cfg80211_chan_def chandef; + struct cfg80211_chan_def chandef = local->hw.conf.chandef; ieee80211_dfs_cac_cancel(local); if (local->use_chanctx) /* currently not handled */ WARN_ON(1); - else { - chandef = local->hw.conf.chandef; + else cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); - } } void ieee80211_radar_detected(struct ieee80211_hw *hw) @@ -2459,14 +2457,9 @@ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ pos += 2; - if (!ifmsh->pre_value) - ifmsh->pre_value = 1; - else - ifmsh->pre_value++; pre_value = cpu_to_le16(ifmsh->pre_value); memcpy(pos, &pre_value, 2); /* Precedence Value */ pos += 2; - ifmsh->chsw_init = true; } ieee80211_tx_skb(sdata, skb); diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index 2bc2dec..6226803 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -59,7 +59,7 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1, u32 *multi) { return ip1->ipcmp == ip2->ipcmp && - ip2->ccmp == ip2->ccmp; + ip1->ccmp == ip2->ccmp; } static inline int diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index dcddc49..f93b7d0 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1717,6 +1717,19 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule) return -ENOENT; } +static int nf_table_delrule_by_chain(struct nft_ctx *ctx) +{ + struct nft_rule *rule; + int err; + + list_for_each_entry(rule, &ctx->chain->rules, list) { + err = nf_tables_delrule_one(ctx, rule); + if (err < 0) + return err; + } + return 0; +} + static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -1725,8 +1738,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, const struct nft_af_info *afi; struct net *net = sock_net(skb->sk); const struct nft_table *table; - struct nft_chain *chain; - struct nft_rule *rule, *tmp; + struct nft_chain *chain = NULL; + struct nft_rule *rule; int family = nfmsg->nfgen_family, err = 0; struct nft_ctx ctx; @@ -1738,22 +1751,29 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, if (IS_ERR(table)) return PTR_ERR(table); - chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); - if (IS_ERR(chain)) - return PTR_ERR(chain); + if (nla[NFTA_RULE_CHAIN]) { + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + if (IS_ERR(chain)) + return PTR_ERR(chain); + } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); - if (nla[NFTA_RULE_HANDLE]) { - rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); - if (IS_ERR(rule)) - return PTR_ERR(rule); + if (chain) { + if (nla[NFTA_RULE_HANDLE]) { + rule = nf_tables_rule_lookup(chain, + nla[NFTA_RULE_HANDLE]); + if (IS_ERR(rule)) + return PTR_ERR(rule); - err = nf_tables_delrule_one(&ctx, rule); - } else { - /* Remove all rules in this chain */ - list_for_each_entry_safe(rule, tmp, &chain->rules, list) { err = nf_tables_delrule_one(&ctx, rule); + } else { + err = nf_table_delrule_by_chain(&ctx); + } + } else { + list_for_each_entry(chain, &table->chains, list) { + ctx.chain = chain; + err = nf_table_delrule_by_chain(&ctx); if (err < 0) break; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9ff035c..a3910fc 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -325,21 +325,24 @@ static void htable_gc(unsigned long htlong) add_timer(&ht->timer); } -static void htable_destroy(struct xt_hashlimit_htable *hinfo) +static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); struct proc_dir_entry *parent; - del_timer_sync(&hinfo->timer); - if (hinfo->family == NFPROTO_IPV4) parent = hashlimit_net->ipt_hashlimit; else parent = hashlimit_net->ip6t_hashlimit; - if(parent != NULL) + if (parent != NULL) remove_proc_entry(hinfo->name, parent); +} +static void htable_destroy(struct xt_hashlimit_htable *hinfo) +{ + del_timer_sync(&hinfo->timer); + htable_remove_proc_entry(hinfo); htable_selective_cleanup(hinfo, select_all); kfree(hinfo->name); vfree(hinfo); @@ -883,21 +886,15 @@ static int __net_init hashlimit_proc_net_init(struct net *net) static void __net_exit hashlimit_proc_net_exit(struct net *net) { struct xt_hashlimit_htable *hinfo; - struct proc_dir_entry *pde; struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); - /* recent_net_exit() is called before recent_mt_destroy(). Make sure - * that the parent xt_recent proc entry is is empty before trying to - * remove it. + /* hashlimit_net_exit() is called before hashlimit_mt_destroy(). + * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc + * entries is empty before trying to remove it. */ mutex_lock(&hashlimit_mutex); - pde = hashlimit_net->ipt_hashlimit; - if (pde == NULL) - pde = hashlimit_net->ip6t_hashlimit; - hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) - remove_proc_entry(hinfo->name, pde); - + htable_remove_proc_entry(hinfo); hashlimit_net->ipt_hashlimit = NULL; hashlimit_net->ip6t_hashlimit = NULL; mutex_unlock(&hashlimit_mutex); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ba2548b..88cfbc1 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -237,6 +237,30 @@ struct packet_skb_cb { static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); +static struct net_device *packet_cached_dev_get(struct packet_sock *po) +{ + struct net_device *dev; + + rcu_read_lock(); + dev = rcu_dereference(po->cached_dev); + if (likely(dev)) + dev_hold(dev); + rcu_read_unlock(); + + return dev; +} + +static void packet_cached_dev_assign(struct packet_sock *po, + struct net_device *dev) +{ + rcu_assign_pointer(po->cached_dev, dev); +} + +static void packet_cached_dev_reset(struct packet_sock *po) +{ + RCU_INIT_POINTER(po->cached_dev, NULL); +} + /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). @@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk) struct packet_sock *po = pkt_sk(sk); if (!po->running) { - if (po->fanout) { + if (po->fanout) __fanout_link(sk, po); - } else { + else dev_add_pack(&po->prot_hook); - rcu_assign_pointer(po->cached_dev, po->prot_hook.dev); - } sock_hold(sk); po->running = 1; @@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync) struct packet_sock *po = pkt_sk(sk); po->running = 0; - if (po->fanout) { + + if (po->fanout) __fanout_unlink(sk, po); - } else { + else __dev_remove_pack(&po->prot_hook); - RCU_INIT_POINTER(po->cached_dev, NULL); - } __sock_put(sk); @@ -2059,19 +2080,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, return tp_len; } -static struct net_device *packet_cached_dev_get(struct packet_sock *po) -{ - struct net_device *dev; - - rcu_read_lock(); - dev = rcu_dereference(po->cached_dev); - if (dev) - dev_hold(dev); - rcu_read_unlock(); - - return dev; -} - static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; @@ -2088,7 +2096,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) mutex_lock(&po->pg_vec_lock); - if (saddr == NULL) { + if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; @@ -2242,7 +2250,7 @@ static int packet_snd(struct socket *sock, * Get and verify the address. */ - if (saddr == NULL) { + if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; @@ -2451,6 +2459,8 @@ static int packet_release(struct socket *sock) spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); + packet_cached_dev_reset(po); + if (po->prot_hook.dev) { dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; @@ -2506,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc spin_lock(&po->bind_lock); unregister_prot_hook(sk, true); + po->num = protocol; po->prot_hook.type = protocol; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); - po->prot_hook.dev = dev; + po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; + packet_cached_dev_assign(po, dev); + if (protocol == 0) goto out_unlock; @@ -2626,7 +2639,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; - RCU_INIT_POINTER(po->cached_dev, NULL); + + packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); @@ -3337,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this, sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { + packet_cached_dev_reset(po); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index e590949..37be6e2 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); scat = &rm->data.op_sg[sg]; - ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; - ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); - return ret; + ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); + return sizeof(struct rds_header) + ret; } /* FIXME we may overallocate here */ diff --git a/net/sched/act_api.c b/net/sched/act_api.c index fd70728..69cb848 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; + /* Must supply act, dump, cleanup and init */ + if (!act->act || !act->dump || !act->cleanup || !act->init) + return -EINVAL; + + /* Supply defaults */ + if (!act->lookup) + act->lookup = tcf_hash_search; + if (!act->walk) + act->walk = tcf_generic_walker; + write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { @@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act, } while ((a = act) != NULL) { repeat: - if (a->ops && a->ops->act) { + if (a->ops) { ret = a->ops->act(skb, a, res); if (TC_MUNGED & skb->tc_verd) { /* copied already, allow trampling */ @@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind) struct tc_action *a; for (a = act; a; a = act) { - if (a->ops && a->ops->cleanup) { + if (a->ops) { if (a->ops->cleanup(a, bind) == ACT_P_DELETED) module_put(a->ops->owner); act = act->next; @@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; - if (a->ops == NULL || a->ops->dump == NULL) + if (a->ops == NULL) return err; return a->ops->dump(skb, a, bind, ref); } @@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; - if (a->ops == NULL || a->ops->dump == NULL) + if (a->ops == NULL) return err; if (nla_put_string(skb, TCA_KIND, a->ops->kind)) @@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid) a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); if (a->ops == NULL) goto err_free; - if (a->ops->lookup == NULL) - goto err_mod; err = -ENOENT; if (a->ops->lookup(a, index) == 0) goto err_mod; @@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; - if (a_o->walk == NULL) { - WARN(1, "tc_dump_action: %s !capable of dumping table\n", - a_o->kind); - goto out_module_put; - } - nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t), 0); if (!nlh) diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 3a4c0ca..5c5edf5 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = { .act = tcf_csum, .dump = tcf_csum_dump, .cleanup = tcf_csum_cleanup, - .lookup = tcf_hash_search, .init = tcf_csum_init, - .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Checksum updating actions"); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index fd2b3cf..5645a4d 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -206,9 +206,7 @@ static struct tc_action_ops act_gact_ops = { .act = tcf_gact, .dump = tcf_gact_dump, .cleanup = tcf_gact_cleanup, - .lookup = tcf_hash_search, .init = tcf_gact_init, - .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60d88b6..882a897 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -298,9 +298,7 @@ static struct tc_action_ops act_ipt_ops = { .act = tcf_ipt, .dump = tcf_ipt_dump, .cleanup = tcf_ipt_cleanup, - .lookup = tcf_hash_search, .init = tcf_ipt_init, - .walk = tcf_generic_walker }; static struct tc_action_ops act_xt_ops = { @@ -312,9 +310,7 @@ static struct tc_action_ops act_xt_ops = { .act = tcf_ipt, .dump = tcf_ipt_dump, .cleanup = tcf_ipt_cleanup, - .lookup = tcf_hash_search, .init = tcf_ipt_init, - .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 977c10e..2523781 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = { .act = tcf_mirred, .dump = tcf_mirred_dump, .cleanup = tcf_mirred_cleanup, - .lookup = tcf_hash_search, .init = tcf_mirred_init, - .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002)"); diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 876f0ef..6a15ace 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = { .act = tcf_nat, .dump = tcf_nat_dump, .cleanup = tcf_nat_cleanup, - .lookup = tcf_hash_search, .init = tcf_nat_init, - .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Stateless NAT actions"); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 7ed78c9..03b6767 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -243,9 +243,7 @@ static struct tc_action_ops act_pedit_ops = { .act = tcf_pedit, .dump = tcf_pedit_dump, .cleanup = tcf_pedit_cleanup, - .lookup = tcf_hash_search, .init = tcf_pedit_init, - .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 272d8e9..16a62c3 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -407,7 +407,6 @@ static struct tc_action_ops act_police_ops = { .act = tcf_act_police, .dump = tcf_act_police_dump, .cleanup = tcf_act_police_cleanup, - .lookup = tcf_hash_search, .init = tcf_act_police_locate, .walk = tcf_act_police_walker }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 7725eb4..31157d3 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -201,7 +201,6 @@ static struct tc_action_ops act_simp_ops = { .dump = tcf_simp_dump, .cleanup = tcf_simp_cleanup, .init = tcf_simp_init, - .walk = tcf_generic_walker, }; MODULE_AUTHOR("Jamal Hadi Salim(2005)"); diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index cb42211..35ea643 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -203,7 +203,6 @@ static struct tc_action_ops act_skbedit_ops = { .dump = tcf_skbedit_dump, .cleanup = tcf_skbedit_cleanup, .init = tcf_skbedit_init, - .walk = tcf_generic_walker, }; MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 0e1e38b..717b210 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1477,11 +1477,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, sch_tree_lock(sch); } + rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; + + ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; + + psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); + psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); + /* it used to be a nasty bug here, we have to check that node * is really leaf before changing cl->un.leaf ! */ if (!cl->level) { - cl->quantum = hopt->rate.rate / q->rate2quantum; + u64 quantum = cl->rate.rate_bytes_ps; + + do_div(quantum, q->rate2quantum); + cl->quantum = min_t(u64, quantum, INT_MAX); + if (!hopt->quantum && cl->quantum < 1000) { pr_warning( "HTB: quantum of class %X is small. Consider r2q change.\n", @@ -1500,13 +1511,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->prio = TC_HTB_NUMPRIO - 1; } - rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; - - ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; - - psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); - psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); - cl->buffer = PSCHED_TICKS2NS(hopt->buffer); cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index a609005..887e672 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -118,6 +118,32 @@ struct tbf_sched_data { }; +/* Time to Length, convert time in ns to length in bytes + * to determinate how many bytes can be sent in given time. + */ +static u64 psched_ns_t2l(const struct psched_ratecfg *r, + u64 time_in_ns) +{ + /* The formula is : + * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC + */ + u64 len = time_in_ns * r->rate_bytes_ps; + + do_div(len, NSEC_PER_SEC); + + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { + do_div(len, 53); + len = len * 48; + } + + if (len > r->overhead) + len -= r->overhead; + else + len = 0; + + return len; +} + /* * Return length of individual segments of a gso packet, * including all headers (MAC, IP, TCP/UDP) @@ -289,10 +315,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_TBF_MAX + 1]; struct tc_tbf_qopt *qopt; - struct qdisc_rate_table *rtab = NULL; - struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; - int max_size, n; + struct psched_ratecfg rate; + struct psched_ratecfg peak; + u64 max_size; + s64 buffer, mtu; u64 rate64 = 0, prate64 = 0; err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); @@ -304,38 +331,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) goto done; qopt = nla_data(tb[TCA_TBF_PARMS]); - rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); - if (rtab == NULL) - goto done; - - if (qopt->peakrate.rate) { - if (qopt->peakrate.rate > qopt->rate.rate) - ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); - if (ptab == NULL) - goto done; - } - - for (n = 0; n < 256; n++) - if (rtab->data[n] > qopt->buffer) - break; - max_size = (n << qopt->rate.cell_log) - 1; - if (ptab) { - int size; - - for (n = 0; n < 256; n++) - if (ptab->data[n] > qopt->mtu) - break; - size = (n << qopt->peakrate.cell_log) - 1; - if (size < max_size) - max_size = size; - } - if (max_size < 0) - goto done; + if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) + qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, + tb[TCA_TBF_RTAB])); - if (max_size < psched_mtu(qdisc_dev(sch))) - pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", - max_size, qdisc_dev(sch)->name, - psched_mtu(qdisc_dev(sch))); + if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) + qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, + tb[TCA_TBF_PTAB])); if (q->qdisc != &noop_qdisc) { err = fifo_set_limit(q->qdisc, qopt->limit); @@ -349,6 +351,39 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) } } + buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); + mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); + + if (tb[TCA_TBF_RATE64]) + rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); + psched_ratecfg_precompute(&rate, &qopt->rate, rate64); + + max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); + + if (qopt->peakrate.rate) { + if (tb[TCA_TBF_PRATE64]) + prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); + psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); + if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { + pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", + peak.rate_bytes_ps, rate.rate_bytes_ps); + err = -EINVAL; + goto done; + } + + max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); + } + + if (max_size < psched_mtu(qdisc_dev(sch))) + pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", + max_size, qdisc_dev(sch)->name, + psched_mtu(qdisc_dev(sch))); + + if (!max_size) { + err = -EINVAL; + goto done; + } + sch_tree_lock(sch); if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); @@ -362,13 +397,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) q->tokens = q->buffer; q->ptokens = q->mtu; - if (tb[TCA_TBF_RATE64]) - rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); - psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64); - if (ptab) { - if (tb[TCA_TBF_PRATE64]) - prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); - psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64); + memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); + if (qopt->peakrate.rate) { + memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); q->peak_present = true; } else { q->peak_present = false; @@ -377,10 +408,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) sch_tree_unlock(sch); err = 0; done: - if (rtab) - qdisc_put_rtab(rtab); - if (ptab) - qdisc_put_rtab(ptab); return err; } diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 68a27f9..31ed008 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -154,8 +154,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; - asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = - min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ; + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) @@ -291,8 +290,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->peer.ipv6_address = 1; INIT_LIST_HEAD(&asoc->asocs); - asoc->autoclose = sp->autoclose; - asoc->default_stream = sp->default_stream; asoc->default_ppid = sp->default_ppid; asoc->default_flags = sp->default_flags; diff --git a/net/sctp/output.c b/net/sctp/output.c index 0e2644d..0fb140f 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -581,7 +581,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) unsigned long timeout; /* Restart the AUTOCLOSE timer when sending data. */ - if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { + if (sctp_state(asoc, ESTABLISHED) && + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 53c452e..5e68b94 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -38,6 +38,7 @@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> +MODULE_SOFTDEP("pre: sctp"); MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); MODULE_DESCRIPTION("SCTP snooper"); MODULE_LICENSE("GPL"); @@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = { .entry = jsctp_sf_eat_sack, }; +static __init int sctp_setup_jprobe(void) +{ + int ret = register_jprobe(&sctp_recv_probe); + + if (ret) { + if (request_module("sctp")) + goto out; + ret = register_jprobe(&sctp_recv_probe); + } + +out: + return ret; +} + static __init int sctpprobe_init(void) { int ret = -ENOMEM; @@ -202,7 +217,7 @@ static __init int sctpprobe_init(void) &sctpprobe_fops)) goto free_kfifo; - ret = register_jprobe(&sctp_recv_probe); + ret = sctp_setup_jprobe(); if (ret) goto remove_proc; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index dfe3f36..a26065b 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -820,7 +820,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); - if (new_asoc->autoclose) + if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); @@ -908,7 +908,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); - if (asoc->autoclose) + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); @@ -2970,7 +2970,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) force = SCTP_FORCE(); - if (asoc->autoclose) { + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); } @@ -3878,7 +3878,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, SCTP_CHUNK(chunk)); /* Count this as receiving DATA. */ - if (asoc->autoclose) { + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); } @@ -5267,7 +5267,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); - if (asoc->autoclose) + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); @@ -5346,7 +5346,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); - if (asoc->autoclose) + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 72046b9..42b709c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2196,6 +2196,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); + struct net *net = sock_net(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) @@ -2205,6 +2206,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; + if (sp->autoclose > net->sctp.max_autoclose) + sp->autoclose = net->sctp.max_autoclose; + return 0; } @@ -2811,6 +2815,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; + unsigned long rto_min, rto_max; + struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; @@ -2824,26 +2830,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; + rto_max = rtoinfo.srto_max; + rto_min = rtoinfo.srto_min; + + if (rto_max) + rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; + else + rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; + + if (rto_min) + rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; + else + rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; + + if (rto_min > rto_max) + return -EINVAL; + if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); - if (rtoinfo.srto_max != 0) - asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); - if (rtoinfo.srto_min != 0) - asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min); + asoc->rto_max = rto_max; + asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ - struct sctp_sock *sp = sctp_sk(sk); - if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; - if (rtoinfo.srto_max != 0) - sp->rtoinfo.srto_max = rtoinfo.srto_max; - if (rtoinfo.srto_min != 0) - sp->rtoinfo.srto_min = rtoinfo.srto_min; + sp->rtoinfo.srto_max = rto_max; + sp->rtoinfo.srto_min = rto_min; } return 0; diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 6b36561..b0565af 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -56,11 +56,16 @@ extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; extern int sysctl_sctp_wmem[3]; -static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, - int write, +static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, - loff_t *ppos); + static struct ctl_table sctp_table[] = { { .procname = "sctp_mem", @@ -102,17 +107,17 @@ static struct ctl_table sctp_net_table[] = { .data = &init_net.sctp.rto_min, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_sctp_do_rto_min, .extra1 = &one, - .extra2 = &timer_max + .extra2 = &init_net.sctp.rto_max }, { .procname = "rto_max", .data = &init_net.sctp.rto_max, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .proc_handler = proc_sctp_do_rto_max, + .extra1 = &init_net.sctp.rto_min, .extra2 = &timer_max }, { @@ -294,8 +299,7 @@ static struct ctl_table sctp_net_table[] = { { /* sentinel */ } }; -static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, - int write, +static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -342,6 +346,60 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, return ret; } +static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + int new_value; + struct ctl_table tbl; + unsigned int min = *(unsigned int *) ctl->extra1; + unsigned int max = *(unsigned int *) ctl->extra2; + int ret; + + memset(&tbl, 0, sizeof(struct ctl_table)); + tbl.maxlen = sizeof(unsigned int); + + if (write) + tbl.data = &new_value; + else + tbl.data = &net->sctp.rto_min; + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); + if (write) { + if (ret || new_value > max || new_value < min) + return -EINVAL; + net->sctp.rto_min = new_value; + } + return ret; +} + +static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + int new_value; + struct ctl_table tbl; + unsigned int min = *(unsigned int *) ctl->extra1; + unsigned int max = *(unsigned int *) ctl->extra2; + int ret; + + memset(&tbl, 0, sizeof(struct ctl_table)); + tbl.maxlen = sizeof(unsigned int); + + if (write) + tbl.data = &new_value; + else + tbl.data = &net->sctp.rto_max; + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); + if (write) { + if (ret || new_value > max || new_value < min) + return -EINVAL; + net->sctp.rto_max = new_value; + } + return ret; +} + int sctp_sysctl_net_register(struct net *net) { struct ctl_table *table; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index e332efb..efc46ff 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -573,7 +573,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t) u32 old_cwnd = t->cwnd; u32 max_burst_bytes; - if (t->burst_limited) + if (t->burst_limited || asoc->max_burst == 0) return; max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); diff --git a/net/tipc/core.c b/net/tipc/core.c index fd4eeea..c6d3f75 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -113,7 +113,6 @@ err: static void tipc_core_stop(void) { tipc_netlink_stop(); - tipc_handler_stop(); tipc_cfg_stop(); tipc_subscr_stop(); tipc_nametbl_stop(); @@ -146,9 +145,10 @@ static int tipc_core_start(void) res = tipc_subscr_start(); if (!res) res = tipc_cfg_init(); - if (res) + if (res) { + tipc_handler_stop(); tipc_core_stop(); - + } return res; } @@ -178,6 +178,7 @@ static int __init tipc_init(void) static void __exit tipc_exit(void) { + tipc_handler_stop(); tipc_core_stop_net(); tipc_core_stop(); pr_info("Deactivated\n"); diff --git a/net/tipc/handler.c b/net/tipc/handler.c index b36f0fc..e4bc8a2 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -56,12 +56,13 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument) { struct queue_item *item; + spin_lock_bh(&qitem_lock); if (!handler_enabled) { pr_err("Signal request ignored by handler\n"); + spin_unlock_bh(&qitem_lock); return -ENOPROTOOPT; } - spin_lock_bh(&qitem_lock); item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); if (!item) { pr_err("Signal queue out of memory\n"); @@ -112,10 +113,14 @@ void tipc_handler_stop(void) struct list_head *l, *n; struct queue_item *item; - if (!handler_enabled) + spin_lock_bh(&qitem_lock); + if (!handler_enabled) { + spin_unlock_bh(&qitem_lock); return; - + } handler_enabled = 0; + spin_unlock_bh(&qitem_lock); + tasklet_kill(&tipc_tasklet); spin_lock_bh(&qitem_lock); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 01625cc..a427623 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, int); -static void unix_set_peek_off(struct sock *sk, int val) +static int unix_set_peek_off(struct sock *sk, int val) { struct unix_sock *u = unix_sk(sk); - mutex_lock(&u->readlock); + if (mutex_lock_interruptible(&u->readlock)) + return -EINTR; + sk->sk_peek_off = val; mutex_unlock(&u->readlock); + + return 0; } @@ -714,7 +718,9 @@ static int unix_autobind(struct socket *sock) int err; unsigned int retries = 0; - mutex_lock(&u->readlock); + err = mutex_lock_interruptible(&u->readlock); + if (err) + return err; err = 0; if (u->addr) @@ -873,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; addr_len = err; - mutex_lock(&u->readlock); + err = mutex_lock_interruptible(&u->readlock); + if (err) + goto out; err = -EINVAL; if (u->addr) diff --git a/net/wireless/core.c b/net/wireless/core.c index aff959e..52b865f 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -451,6 +451,15 @@ int wiphy_register(struct wiphy *wiphy) int i; u16 ifmodes = wiphy->interface_modes; + /* support for 5/10 MHz is broken due to nl80211 API mess - disable */ + wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ; + + /* + * There are major locking problems in nl80211/mac80211 for CSA, + * disable for all drivers until this has been reworked. + */ + wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH; + #ifdef CONFIG_PM if (WARN_ON(wiphy->wowlan && (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 9d797df..89737ee 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -262,7 +262,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, /* try to find an IBSS channel if none requested ... */ if (!wdev->wext.ibss.chandef.chan) { - wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + struct ieee80211_channel *new_chan = NULL; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { struct ieee80211_supported_band *sband; @@ -278,18 +278,19 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, continue; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; - wdev->wext.ibss.chandef.chan = chan; - wdev->wext.ibss.chandef.center_freq1 = - chan->center_freq; + new_chan = chan; break; } - if (wdev->wext.ibss.chandef.chan) + if (new_chan) break; } - if (!wdev->wext.ibss.chandef.chan) + if (!new_chan) return -EINVAL; + + cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan, + NL80211_CHAN_NO_HT); } /* don't join -- SSID is not there */ @@ -363,9 +364,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, return err; if (chan) { - wdev->wext.ibss.chandef.chan = chan; - wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; - wdev->wext.ibss.chandef.center_freq1 = freq; + cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan, + NL80211_CHAN_NO_HT); wdev->wext.ibss.channel_fixed = true; } else { /* cfg80211_ibss_wext_join will pick one if needed */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a1eb210..138dc3b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2687,7 +2687,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_NEW_KEY); if (!hdr) - return -ENOBUFS; + goto nla_put_failure; cookie.msg = msg; cookie.idx = key_idx; @@ -5349,6 +5349,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) err = -EINVAL; goto out_free; } + + if (!wiphy->bands[band]) + continue; + err = ieee80211_get_ratemask(wiphy->bands[band], nla_data(attr), nla_len(attr), @@ -9633,8 +9637,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg, nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) goto nla_put_failure; - if (req->flags) - nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags); + if (req->flags && + nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) + goto nla_put_failure; return 0; nla_put_failure: @@ -11093,6 +11098,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct nlattr *reasons; reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); + if (!reasons) + goto free_msg; if (wakeup->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) @@ -11118,16 +11125,18 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, wakeup->pattern_idx)) goto free_msg; - if (wakeup->tcp_match) - nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH); + if (wakeup->tcp_match && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH)) + goto free_msg; - if (wakeup->tcp_connlost) - nla_put_flag(msg, - NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST); + if (wakeup->tcp_connlost && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST)) + goto free_msg; - if (wakeup->tcp_nomoretokens) - nla_put_flag(msg, - NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS); + if (wakeup->tcp_nomoretokens && + nla_put_flag(msg, + NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS)) + goto free_msg; if (wakeup->packet) { u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; @@ -11263,24 +11272,29 @@ void cfg80211_ft_event(struct net_device *netdev, return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); - if (!hdr) { - nlmsg_free(msg); - return; - } + if (!hdr) + goto out; - nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap); - if (ft_event->ies) - nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies); - if (ft_event->ric_ies) - nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, - ft_event->ric_ies); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap)) + goto out; + + if (ft_event->ies && + nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies)) + goto out; + if (ft_event->ric_ies && + nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, + ft_event->ric_ies)) + goto out; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); + return; + out: + nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_ft_event); diff --git a/scripts/sortextable.c b/scripts/sortextable.c index 5f7a8b6..7941fbd 100644 --- a/scripts/sortextable.c +++ b/scripts/sortextable.c @@ -31,6 +31,10 @@ #include <tools/be_byteshift.h> #include <tools/le_byteshift.h> +#ifndef EM_ARCOMPACT +#define EM_ARCOMPACT 93 +#endif + #ifndef EM_AARCH64 #define EM_AARCH64 183 #endif @@ -268,6 +272,7 @@ do_file(char const *const fname) case EM_S390: custom_sort = sort_relative_table; break; + case EM_ARCOMPACT: case EM_ARM: case EM_AARCH64: case EM_MIPS: diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 7f44c32..8137b27 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c @@ -70,7 +70,7 @@ int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep) * * TODO: Encrypt the stored data with a temporary key. */ - file = shmem_file_setup("", datalen, 0); + file = shmem_kernel_file_setup("", datalen, 0); if (IS_ERR(file)) { ret = PTR_ERR(file); goto err_quota; diff --git a/security/keys/key.c b/security/keys/key.c index 55d110f..6e21c11 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -272,7 +272,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, } /* allocate and initialise the key and its description */ - key = kmem_cache_alloc(key_jar, GFP_KERNEL); + key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; @@ -293,18 +293,12 @@ struct key *key_alloc(struct key_type *type, const char *desc, key->uid = uid; key->gid = gid; key->perm = perm; - key->flags = 0; - key->expiry = 0; - key->payload.data = NULL; - key->security = NULL; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_TRUSTED) key->flags |= 1 << KEY_FLAG_TRUSTED; - memset(&key->type_data, 0, sizeof(key->type_data)); - #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 69f0cb7..d46cbc5 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -160,7 +160,7 @@ static u64 mult_64x32_and_fold(u64 x, u32 y) static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; - const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK; + const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; @@ -194,10 +194,10 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ - if (index_key->type != &key_type_keyring && (hash & level_mask) == 0) + if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; - if (index_key->type == &key_type_keyring && (hash & level_mask) != 0) - return (hash + (hash << level_shift)) & ~level_mask; + if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) + return (hash + (hash << level_shift)) & ~fan_mask; return hash; } @@ -279,12 +279,11 @@ static bool keyring_compare_object(const void *object, const void *data) * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ -static int keyring_diff_objects(const void *_a, const void *_b) +static int keyring_diff_objects(const void *object, const void *data) { - const struct key *key_a = keyring_ptr_to_key(_a); - const struct key *key_b = keyring_ptr_to_key(_b); + const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; - const struct keyring_index_key *b = &key_b->index_key; + const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; @@ -691,8 +690,8 @@ descend_to_node: smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); - node = assoc_array_ptr_to_node(ptr); } + node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 794c3ca..419491d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -53,6 +53,7 @@ #include <net/ip.h> /* for local_port_range[] */ #include <net/sock.h> #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ +#include <net/inet_connection_sock.h> #include <net/net_namespace.h> #include <net/netlabel.h> #include <linux/uaccess.h> @@ -95,10 +96,6 @@ #include "audit.h" #include "avc_ss.h" -#define SB_TYPE_FMT "%s%s%s" -#define SB_SUBTYPE(sb) (sb->s_subtype && sb->s_subtype[0]) -#define SB_TYPE_ARGS(sb) sb->s_type->name, SB_SUBTYPE(sb) ? "." : "", SB_SUBTYPE(sb) ? sb->s_subtype : "" - extern struct security_operations *security_ops; /* SECMARK reference count */ @@ -413,8 +410,8 @@ static int sb_finish_set_opts(struct super_block *sb) the first boot of the SELinux kernel before we have assigned xattr values to the filesystem. */ if (!root_inode->i_op->getxattr) { - printk(KERN_WARNING "SELinux: (dev %s, type "SB_TYPE_FMT") has no " - "xattr support\n", sb->s_id, SB_TYPE_ARGS(sb)); + printk(KERN_WARNING "SELinux: (dev %s, type %s) has no " + "xattr support\n", sb->s_id, sb->s_type->name); rc = -EOPNOTSUPP; goto out; } @@ -422,22 +419,22 @@ static int sb_finish_set_opts(struct super_block *sb) if (rc < 0 && rc != -ENODATA) { if (rc == -EOPNOTSUPP) printk(KERN_WARNING "SELinux: (dev %s, type " - SB_TYPE_FMT") has no security xattr handler\n", - sb->s_id, SB_TYPE_ARGS(sb)); + "%s) has no security xattr handler\n", + sb->s_id, sb->s_type->name); else printk(KERN_WARNING "SELinux: (dev %s, type " - SB_TYPE_FMT") getxattr errno %d\n", sb->s_id, - SB_TYPE_ARGS(sb), -rc); + "%s) getxattr errno %d\n", sb->s_id, + sb->s_type->name, -rc); goto out; } } if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) - printk(KERN_ERR "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), unknown behavior\n", - sb->s_id, SB_TYPE_ARGS(sb)); + printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", + sb->s_id, sb->s_type->name); else - printk(KERN_DEBUG "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), %s\n", - sb->s_id, SB_TYPE_ARGS(sb), + printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n", + sb->s_id, sb->s_type->name, labeling_behaviors[sbsec->behavior-1]); sbsec->flags |= SE_SBINITIALIZED; @@ -600,6 +597,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, const struct cred *cred = current_cred(); int rc = 0, i; struct superblock_security_struct *sbsec = sb->s_security; + const char *name = sb->s_type->name; struct inode *inode = sbsec->sb->s_root->d_inode; struct inode_security_struct *root_isec = inode->i_security; u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; @@ -658,8 +656,8 @@ static int selinux_set_mnt_opts(struct super_block *sb, strlen(mount_options[i]), &sid); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" - "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", - mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); + "(%s) failed for (dev %s, type %s) errno=%d\n", + mount_options[i], sb->s_id, name, rc); goto out; } switch (flags[i]) { @@ -806,8 +804,7 @@ out: out_double_mount: rc = -EINVAL; printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different " - "security settings for (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, - SB_TYPE_ARGS(sb)); + "security settings for (dev %s, type %s)\n", sb->s_id, name); goto out; } @@ -2480,8 +2477,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data) rc = security_context_to_sid(mount_options[i], len, &sid); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" - "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", - mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); + "(%s) failed for (dev %s, type %s) errno=%d\n", + mount_options[i], sb->s_id, sb->s_type->name, rc); goto out_free_opts; } rc = -EINVAL; @@ -2519,8 +2516,8 @@ out_free_secdata: return rc; out_bad_option: printk(KERN_WARNING "SELinux: unable to change security options " - "during remount (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, - SB_TYPE_ARGS(sb)); + "during remount (dev %s, type=%s)\n", sb->s_id, + sb->s_type->name); goto out_free_opts; } @@ -3828,7 +3825,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) u32 nlbl_sid; u32 nlbl_type; - err = selinux_skb_xfrm_sid(skb, &xfrm_sid); + err = selinux_xfrm_skb_sid(skb, &xfrm_sid); if (unlikely(err)) return -EACCES; err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); @@ -3846,6 +3843,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) return 0; } +/** + * selinux_conn_sid - Determine the child socket label for a connection + * @sk_sid: the parent socket's SID + * @skb_sid: the packet's SID + * @conn_sid: the resulting connection SID + * + * If @skb_sid is valid then the user:role:type information from @sk_sid is + * combined with the MLS information from @skb_sid in order to create + * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy + * of @sk_sid. Returns zero on success, negative values on failure. + * + */ +static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid) +{ + int err = 0; + + if (skb_sid != SECSID_NULL) + err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid); + else + *conn_sid = sk_sid; + + return err; +} + /* socket security operations */ static int socket_sockcreate_sid(const struct task_security_struct *tsec, @@ -4452,7 +4473,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct sk_security_struct *sksec = sk->sk_security; int err; u16 family = sk->sk_family; - u32 newsid; + u32 connsid; u32 peersid; /* handle mapped IPv4 packets arriving via IPv6 sockets */ @@ -4462,16 +4483,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb, err = selinux_skb_peerlbl_sid(skb, family, &peersid); if (err) return err; - if (peersid == SECSID_NULL) { - req->secid = sksec->sid; - req->peer_secid = SECSID_NULL; - } else { - err = security_sid_mls_copy(sksec->sid, peersid, &newsid); - if (err) - return err; - req->secid = newsid; - req->peer_secid = peersid; - } + err = selinux_conn_sid(sksec->sid, peersid, &connsid); + if (err) + return err; + req->secid = connsid; + req->peer_secid = peersid; return selinux_netlbl_inet_conn_request(req, family); } @@ -4731,6 +4747,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops, static unsigned int selinux_ip_output(struct sk_buff *skb, u16 family) { + struct sock *sk; u32 sid; if (!netlbl_enabled()) @@ -4739,8 +4756,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb, /* we do this in the LOCAL_OUT path and not the POST_ROUTING path * because we want to make sure we apply the necessary labeling * before IPsec is applied so we can leverage AH protection */ - if (skb->sk) { - struct sk_security_struct *sksec = skb->sk->sk_security; + sk = skb->sk; + if (sk) { + struct sk_security_struct *sksec; + + if (sk->sk_state == TCP_LISTEN) + /* if the socket is the listening state then this + * packet is a SYN-ACK packet which means it needs to + * be labeled based on the connection/request_sock and + * not the parent socket. unfortunately, we can't + * lookup the request_sock yet as it isn't queued on + * the parent socket until after the SYN-ACK is sent. + * the "solution" is to simply pass the packet as-is + * as any IP option based labeling should be copied + * from the initial connection request (in the IP + * layer). it is far from ideal, but until we get a + * security label in the packet itself this is the + * best we can do. */ + return NF_ACCEPT; + + /* standard practice, label using the parent socket */ + sksec = sk->sk_security; sid = sksec->sid; } else sid = SECINITSID_KERNEL; @@ -4810,27 +4846,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, * as fast and as clean as possible. */ if (!selinux_policycap_netpeer) return selinux_ip_postroute_compat(skb, ifindex, family); + + secmark_active = selinux_secmark_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); + if (!secmark_active && !peerlbl_active) + return NF_ACCEPT; + + sk = skb->sk; + #ifdef CONFIG_XFRM /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec * packet transformation so allow the packet to pass without any checks * since we'll have another chance to perform access control checks * when the packet is on it's final way out. * NOTE: there appear to be some IPv6 multicast cases where skb->dst - * is NULL, in this case go ahead and apply access control. */ - if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL) + * is NULL, in this case go ahead and apply access control. + * NOTE: if this is a local socket (skb->sk != NULL) that is in the + * TCP listening state we cannot wait until the XFRM processing + * is done as we will miss out on the SA label if we do; + * unfortunately, this means more work, but it is only once per + * connection. */ + if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL && + !(sk != NULL && sk->sk_state == TCP_LISTEN)) return NF_ACCEPT; #endif - secmark_active = selinux_secmark_enabled(); - peerlbl_active = selinux_peerlbl_enabled(); - if (!secmark_active && !peerlbl_active) - return NF_ACCEPT; - /* if the packet is being forwarded then get the peer label from the - * packet itself; otherwise check to see if it is from a local - * application or the kernel, if from an application get the peer label - * from the sending socket, otherwise use the kernel's sid */ - sk = skb->sk; if (sk == NULL) { + /* Without an associated socket the packet is either coming + * from the kernel or it is being forwarded; check the packet + * to determine which and if the packet is being forwarded + * query the packet directly to determine the security label. */ if (skb->skb_iif) { secmark_perm = PACKET__FORWARD_OUT; if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) @@ -4839,7 +4884,45 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, secmark_perm = PACKET__SEND; peer_sid = SECINITSID_KERNEL; } + } else if (sk->sk_state == TCP_LISTEN) { + /* Locally generated packet but the associated socket is in the + * listening state which means this is a SYN-ACK packet. In + * this particular case the correct security label is assigned + * to the connection/request_sock but unfortunately we can't + * query the request_sock as it isn't queued on the parent + * socket until after the SYN-ACK packet is sent; the only + * viable choice is to regenerate the label like we do in + * selinux_inet_conn_request(). See also selinux_ip_output() + * for similar problems. */ + u32 skb_sid; + struct sk_security_struct *sksec = sk->sk_security; + if (selinux_skb_peerlbl_sid(skb, family, &skb_sid)) + return NF_DROP; + /* At this point, if the returned skb peerlbl is SECSID_NULL + * and the packet has been through at least one XFRM + * transformation then we must be dealing with the "final" + * form of labeled IPsec packet; since we've already applied + * all of our access controls on this packet we can safely + * pass the packet. */ + if (skb_sid == SECSID_NULL) { + switch (family) { + case PF_INET: + if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) + return NF_ACCEPT; + break; + case PF_INET6: + if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) + return NF_ACCEPT; + default: + return NF_DROP_ERR(-ECONNREFUSED); + } + } + if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid)) + return NF_DROP; + secmark_perm = PACKET__SEND; } else { + /* Locally generated packet, fetch the security label from the + * associated socket. */ struct sk_security_struct *sksec = sk->sk_security; peer_sid = sksec->sid; secmark_perm = PACKET__SEND; diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 0dec76c..48c3cc9 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -39,6 +39,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, struct common_audit_data *ad, u8 proto); int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid); static inline void selinux_xfrm_notify_policyload(void) { @@ -79,11 +80,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, static inline void selinux_xfrm_notify_policyload(void) { } -#endif -static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) +static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) { - return selinux_xfrm_decode_session(skb, sid, 0); + *sid = SECSID_NULL; + return 0; } +#endif #endif /* _SELINUX_XFRM_H_ */ diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index ee470a0..d106733 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2334,50 +2334,16 @@ int security_fs_use(struct super_block *sb) struct ocontext *c; struct superblock_security_struct *sbsec = sb->s_security; const char *fstype = sb->s_type->name; - const char *subtype = (sb->s_subtype && sb->s_subtype[0]) ? sb->s_subtype : NULL; - struct ocontext *base = NULL; read_lock(&policy_rwlock); - for (c = policydb.ocontexts[OCON_FSUSE]; c; c = c->next) { - char *sub; - int baselen; - - baselen = strlen(fstype); - - /* if base does not match, this is not the one */ - if (strncmp(fstype, c->u.name, baselen)) - continue; - - /* if there is no subtype, this is the one! */ - if (!subtype) - break; - - /* skip past the base in this entry */ - sub = c->u.name + baselen; - - /* entry is only a base. save it. keep looking for subtype */ - if (sub[0] == '\0') { - base = c; - continue; - } - - /* entry is not followed by a subtype, so it is not a match */ - if (sub[0] != '.') - continue; - - /* whew, we found a subtype of this fstype */ - sub++; /* move past '.' */ - - /* exact match of fstype AND subtype */ - if (!strcmp(subtype, sub)) + c = policydb.ocontexts[OCON_FSUSE]; + while (c) { + if (strcmp(fstype, c->u.name) == 0) break; + c = c->next; } - /* in case we had found an fstype match but no subtype match */ - if (!c) - c = base; - if (c) { sbsec->behavior = c->v.behavior; if (!c->sid[0]) { diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index a91d205..0462cb3 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -209,19 +209,26 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, NULL) ? 0 : 1); } -/* - * LSM hook implementation that checks and/or returns the xfrm sid for the - * incoming packet. - */ -int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) +static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb) { - u32 sid_session = SECSID_NULL; - struct sec_path *sp; + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x; - if (skb == NULL) - goto out; + if (dst == NULL) + return SECSID_NULL; + x = dst->xfrm; + if (x == NULL || !selinux_authorizable_xfrm(x)) + return SECSID_NULL; + + return x->security->ctx_sid; +} + +static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb, + u32 *sid, int ckall) +{ + u32 sid_session = SECSID_NULL; + struct sec_path *sp = skb->sp; - sp = skb->sp; if (sp) { int i; @@ -248,6 +255,30 @@ out: } /* + * LSM hook implementation that checks and/or returns the xfrm sid for the + * incoming packet. + */ +int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) +{ + if (skb == NULL) { + *sid = SECSID_NULL; + return 0; + } + return selinux_xfrm_skb_sid_ingress(skb, sid, ckall); +} + +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) +{ + int rc; + + rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0); + if (rc == 0 && *sid == SECSID_NULL) + *sid = selinux_xfrm_skb_sid_egress(skb); + + return rc; +} + +/* * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. */ int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, @@ -327,19 +358,22 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, return rc; ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); - if (!ctx) - return -ENOMEM; + if (!ctx) { + rc = -ENOMEM; + goto out; + } ctx->ctx_doi = XFRM_SC_DOI_LSM; ctx->ctx_alg = XFRM_SC_ALG_SELINUX; ctx->ctx_sid = secid; ctx->ctx_len = str_len; memcpy(ctx->ctx_str, ctx_str, str_len); - kfree(ctx_str); x->security = ctx; atomic_inc(&selinux_xfrm_refcount); - return 0; +out: + kfree(ctx_str); + return rc; } /* diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 6e03b46..a210467 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream, case SNDRV_PCM_STATE_DISCONNECTED: err = -EBADFD; goto _endloop; + case SNDRV_PCM_STATE_PAUSED: + continue; } if (!tout) { snd_printd("%s write error (DMA or IRQ trouble?)\n", diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index c4671d0..c7f6d1c 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx) memset(path, 0, sizeof(*path)); } +/* return a DAC if paired to the given pin by codec driver */ +static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin) +{ + struct hda_gen_spec *spec = codec->spec; + const hda_nid_t *list = spec->preferred_dacs; + + if (!list) + return 0; + for (; *list; list += 2) + if (*list == pin) + return list[1]; + return 0; +} + /* look for an empty DAC slot */ static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, bool is_digital) @@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs, continue; } - dacs[i] = look_for_dac(codec, pin, false); + dacs[i] = get_preferred_dac(codec, pin); + if (dacs[i]) { + if (is_dac_already_used(codec, dacs[i])) + badness += bad->shared_primary; + } + + if (!dacs[i]) + dacs[i] = look_for_dac(codec, pin, false); if (!dacs[i] && !i) { /* try to steal the DAC of surrounds for the front */ for (j = 1; j < num_outs; j++) { @@ -4297,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec, return AC_PWRST_D3; } +/* mute all aamix inputs initially; parse up to the first leaves */ +static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) +{ + int i, nums; + const hda_nid_t *conn; + bool has_amp; + + nums = snd_hda_get_conn_list(codec, mix, &conn); + has_amp = nid_has_mute(codec, mix, HDA_INPUT); + for (i = 0; i < nums; i++) { + if (has_amp) + snd_hda_codec_amp_stereo(codec, mix, + HDA_INPUT, i, + 0xff, HDA_AMP_MUTE); + else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) + snd_hda_codec_amp_stereo(codec, conn[i], + HDA_OUTPUT, 0, + 0xff, HDA_AMP_MUTE); + } +} /* * Parse the given BIOS configuration and set up the hda_gen_spec @@ -4435,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, } } + /* mute all aamix input initially */ + if (spec->mixer_nid) + mute_all_mixer_nid(codec, spec->mixer_nid); + dig_only: parse_digital(codec); diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 7e45cb4..0929a06 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -249,6 +249,9 @@ struct hda_gen_spec { const struct badness_table *main_out_badness; const struct badness_table *extra_out_badness; + /* preferred pin/DAC pairs; an array of paired NIDs */ + const hda_nid_t *preferred_dacs; + /* loopback mixing mode */ bool aamix_mode; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 27aa140..956871d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev) * white/black-list for enable_msi */ static struct snd_pci_quirk msi_black_list[] = { + SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index cac015b..699262a 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -340,6 +340,14 @@ static int patch_ad1986a(struct hda_codec *codec) { int err; struct ad198x_spec *spec; + static hda_nid_t preferred_pairs[] = { + 0x1a, 0x03, + 0x1b, 0x03, + 0x1c, 0x04, + 0x1d, 0x05, + 0x1e, 0x03, + 0 + }; err = alloc_ad_spec(codec); if (err < 0) @@ -360,6 +368,8 @@ static int patch_ad1986a(struct hda_codec *codec) * So, let's disable the shared stream. */ spec->gen.multiout.no_share_stream = 1; + /* give fixed DAC/pin pairs */ + spec->gen.preferred_dacs = preferred_pairs; /* AD1986A can't manage the dynamic pin on/off smoothly */ spec->gen.auto_mute_via_amp = 1; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 1f2717f..3fbf288 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -2936,7 +2936,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), - SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index c4a66ef..f281c80 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2337,8 +2337,9 @@ static int simple_playback_build_controls(struct hda_codec *codec) int err; per_cvt = get_cvt(spec, 0); - err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid, - per_cvt->cvt_nid); + err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid, + per_cvt->cvt_nid, + HDA_PCM_TYPE_HDMI); if (err < 0) return err; return simple_hdmi_build_jack(codec, 0); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c5ea483..c564694 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3849,6 +3849,7 @@ enum { ALC269_FIXUP_ASUS_X101, ALC271_FIXUP_AMIC_MIC2, ALC271_FIXUP_HP_GATE_MIC_JACK, + ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572, ALC269_FIXUP_ACER_AC700, ALC269_FIXUP_LIMIT_INT_MIC_BOOST, ALC269VB_FIXUP_ASUS_ZENBOOK, @@ -4111,6 +4112,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC271_FIXUP_AMIC_MIC2, }, + [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK, + }, [ALC269_FIXUP_ACER_AC700] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -4208,6 +4215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), + SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), @@ -4239,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), + SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -5034,8 +5046,11 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 8697ced..1ead3c9 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, dma_params = ssc_p->dma_params[dir]; - ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); + ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); pr_debug("%s enabled SSC_SR=0x%08x\n", @@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, return 0; } +static int atmel_ssc_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + struct atmel_ssc_info *ssc_p = &ssc_info[dai->id]; + struct atmel_pcm_dma_params *dma_params; + int dir; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + dir = 0; + else + dir = 1; + + dma_params = ssc_p->dma_params[dir]; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); + break; + default: + ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); + break; + } + + return 0; +} #ifdef CONFIG_PM static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) @@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = { .startup = atmel_ssc_startup, .shutdown = atmel_ssc_shutdown, .prepare = atmel_ssc_prepare, + .trigger = atmel_ssc_trigger, .hw_params = atmel_ssc_hw_params, .set_fmt = atmel_ssc_set_dai_fmt, .set_clkdiv = atmel_ssc_set_dai_clkdiv, diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c index 1b37228..7d6a905 100644 --- a/sound/soc/atmel/sam9x5_wm8731.c +++ b/sound/soc/atmel/sam9x5_wm8731.c @@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) dai->stream_name = "WM8731 PCM"; dai->codec_dai_name = "wm8731-hifi"; dai->init = sam9x5_wm8731_init; - dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF + dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM; ret = snd_soc_of_parse_card_name(card, "atmel,model"); diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 99b359e..0ab2dc2 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { { "AEC Loopback", "HPOUT3L", "OUT3L" }, { "AEC Loopback", "HPOUT3R", "OUT3R" }, { "HPOUT3L", NULL, "OUT3L" }, - { "HPOUT3R", NULL, "OUT3L" }, + { "HPOUT3R", NULL, "OUT3R" }, { "AEC Loopback", "SPKOUTL", "OUT4L" }, { "SPKOUTLN", NULL, "OUT4L" }, diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 3938fb1..53bbfac 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_B: - aif1 |= WM8904_AIF_LRCLK_INV; + aif1 |= 0x3 | WM8904_AIF_LRCLK_INV; case SND_SOC_DAIFMT_DSP_A: aif1 |= 0x3; break; diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 543c5c2..0f17ed3 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec) snd_soc_update_bits(codec, WM8962_CLOCKING_4, WM8962_SYSCLK_RATE_MASK, clocking4); + /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK. + * So we here provisionally enable it and then disable it afterward + * if current bias_level hasn't reached SND_SOC_BIAS_ON. + */ + if (codec->dapm.bias_level != SND_SOC_BIAS_ON) + snd_soc_update_bits(codec, WM8962_CLOCKING2, + WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA); + dspclk = snd_soc_read(codec, WM8962_CLOCKING1); + + if (codec->dapm.bias_level != SND_SOC_BIAS_ON) + snd_soc_update_bits(codec, WM8962_CLOCKING2, + WM8962_SYSCLK_ENA_MASK, 0); + if (dspclk < 0) { dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); return; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 46ec0e9..4fbcab6 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp) return ret; /* Wait for the RAM to start, should be near instantaneous */ - count = 0; - do { + for (count = 0; count < 10; ++count) { ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, &val); if (ret != 0) return ret; - } while (!(val & ADSP2_RAM_RDY) && ++count < 10); + + if (val & ADSP2_RAM_RDY) + break; + + msleep(1); + } if (!(val & ADSP2_RAM_RDY)) { adsp_err(dsp, "Failed to start DSP RAM\n"); diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 61e4885..3fd76bc 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c @@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card, break; } - dapm->bias_level = level; - return 0; } diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 0b18f65..3920a5e 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { .playback = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_I2S_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_I2S_FORMATS, }, .ops = &kirkwood_i2s_dai_ops, @@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { .playback = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_SPDIF_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_SPDIF_FORMATS, }, .ops = &kirkwood_i2s_dai_ops, diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index cbc9c96..41949af 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm, } } +static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm) +{ + unsigned int i; + + for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; + i++) { + if (!pcm->chan[i]) + continue; + dma_release_channel(pcm->chan[i]); + if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) + break; + } +} + /** * snd_dmaengine_pcm_register - Register a dmaengine based PCM device * @dev: The parent device for the PCM device @@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { struct dmaengine_pcm *pcm; + int ret; pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); if (!pcm) @@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev, dmaengine_pcm_request_chan_of(pcm, dev); if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) - return snd_soc_add_platform(dev, &pcm->platform, + ret = snd_soc_add_platform(dev, &pcm->platform, &dmaengine_no_residue_pcm_platform); else - return snd_soc_add_platform(dev, &pcm->platform, + ret = snd_soc_add_platform(dev, &pcm->platform, &dmaengine_pcm_platform); + if (ret) + goto err_free_dma; + + return 0; + +err_free_dma: + dmaengine_pcm_release_chan(pcm); + kfree(pcm); + return ret; } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); @@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev) { struct snd_soc_platform *platform; struct dmaengine_pcm *pcm; - unsigned int i; platform = snd_soc_lookup_platform(dev); if (!platform) @@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev) pcm = soc_platform_to_pcm(platform); - for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) { - if (pcm->chan[i]) { - dma_release_channel(pcm->chan[i]); - if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) - break; - } - } - snd_soc_remove_platform(platform); + dmaengine_pcm_release_chan(pcm); kfree(pcm); } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 11a90cd..891b9a9 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; - struct snd_soc_codec *codec = rtd->codec; + bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); /* apply codec digital mute */ - if (!codec->active) + if ((playback && codec_dai->playback_active == 1) || + (!playback && codec_dai->capture_active == 1)) snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); /* free any machine hw params */ diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c index 364bf6a9..8c819f8 100644 --- a/sound/soc/tegra/tegra20_i2s.c +++ b/sound/soc/tegra/tegra20_i2s.c @@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); - unsigned int mask, val; + unsigned int mask = 0, val = 0; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: @@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, return -EINVAL; } - mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; + mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: - val = TEGRA20_I2S_CTRL_MASTER_ENABLE; + val |= TEGRA20_I2S_CTRL_MASTER_ENABLE; break; case SND_SOC_DAIFMT_CBM_CFM: break; diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c index 08bc693..8c7c102 100644 --- a/sound/soc/tegra/tegra20_spdif.c +++ b/sound/soc/tegra/tegra20_spdif.c @@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream, { struct device *dev = dai->dev; struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); - unsigned int mask, val; + unsigned int mask = 0, val = 0; int ret, spdifclock; - mask = TEGRA20_SPDIF_CTRL_PACK | - TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; + mask |= TEGRA20_SPDIF_CTRL_PACK | + TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: - val = TEGRA20_SPDIF_CTRL_PACK | - TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; + val |= TEGRA20_SPDIF_CTRL_PACK | + TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; break; default: return -EINVAL; diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index 231a785..02247fe 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); - unsigned int mask, val; + unsigned int mask = 0, val = 0; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: @@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, return -EINVAL; } - mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; + mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: - val = TEGRA30_I2S_CTRL_MASTER_ENABLE; + val |= TEGRA30_I2S_CTRL_MASTER_ENABLE; break; case SND_SOC_DAIFMT_CBM_CFM: break; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 3454262..f4b12c2 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1603,7 +1603,7 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer) return err; } - return err; + return 0; } int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index dc4de37..bcf1d2f 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c @@ -18,9 +18,9 @@ #include "helpers/bitmask.h" static struct option set_opts[] = { - { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, - { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, - { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, + { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, + { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'}, + { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'}, { }, }; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a0aa84b..4f588bc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1898,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) int r; struct kvm_vcpu *vcpu, *v; + if (id >= KVM_MAX_VCPUS) + return -EINVAL; + vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); |