From 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 26 Jun 2015 03:28:24 +0200 Subject: xen/gntdevt: Fix race condition in gntdev_release() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While gntdev_release() is called the MMU notifier is still registered and can traverse priv->maps list even if no pages are mapped (which is the case -- gntdev_release() is called after all). But gntdev_release() will clear that list, so make sure that only one of those things happens at the same time. Signed-off-by: Marek Marczykowski-Górecki Cc: Signed-off-by: David Vrabel diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 67b9163..0dbb222 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) pr_debug("priv %p\n", priv); + mutex_lock(&priv->lock); while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } WARN_ON(!list_empty(&priv->freeable_maps)); + mutex_unlock(&priv->lock); if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); -- cgit v0.10.2 From ae128293d97404f491dc76f1843c7adacfec3441 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 15 Jun 2015 17:25:16 +0900 Subject: dmaengine: pl330: Fix overflow when reporting residue in memcpy During memcpy operations the residue was always set to an u32 overflowed value. In pl330_tx_status() function number of currently transferred bytes was subtracted from internal "bytes_requested" field. However this "bytes_requested" was not initialized at start to length of memcpy buffer so transferred bytes were subtracted from 0 causing overflow. Signed-off-by: Krzysztof Kozlowski Cc: Fixes: aee4d1fac887 ("dmaengine: pl330: improve pl330_tx_status() function") Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f513f77..c535cd0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = get_burst_len(desc, len); + desc->bytes_requested = len; desc->txd.flags = flags; -- cgit v0.10.2 From 5dd90e5b91e0f5c925b12b132c7cd27538870256 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 15 Jun 2015 23:00:09 +0900 Subject: dmaengine: pl330: Really fix choppy sound because of wrong residue calculation When pl330 driver was used during sound playback, after some time or after a number of plays the sound became choppy or totally noisy. For example on Odroid XU3 board the first four executions of aplay with small WAVE worked fine, but fifth was unrecognizable with errors: $ aplay /usr/share/sounds/alsa/Front_Right.wava underrun!!! (at least 0.095 ms long) Issue was caused by wrong residue reported by pl330 driver to pcm_dmaengine for its cyclic dma transfers. The pl330_tx_status(), residue reporting function, used a "last" flag in a descriptor to indicate that there is no more data to send. The pl330_tx_submit() iterated over descriptors trying to remove this flag from them and then mark last descriptor as "last". However when iterating it actually removed the flag not from descriptors but always from last of it (and then reset it). Thus effectively once some descriptor was marked as last, then it stayed like this forever causing residue to be reported too low. Signed-off-by: Krzysztof Kozlowski Fixes: aee4d1fac887 ("dmaengine: pl330: improve pl330_tx_status() function") Cc: Reported-by: gabriel@unseen.is Suggested-by: Marek Szyprowski Tested-by: Lars-Peter Clausen Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index c535cd0..ecab4ea0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) desc->txd.callback = last->txd.callback; desc->txd.callback_param = last->txd.callback_param; } - last->last = false; + desc->last = false; dma_cookie_assign(&desc->txd); -- cgit v0.10.2 From c9ddbac9c89110f77cb0fa07e634aaf1194899aa Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 14 Jul 2015 18:27:46 -0500 Subject: PCI: Restore PCI_MSIX_FLAGS_BIRMASK definition 09a2c73ddfc7 ("PCI: Remove unused PCI_MSIX_FLAGS_BIRMASK definition") removed PCI_MSIX_FLAGS_BIRMASK from an exported header because it was unused in the kernel. But that breaks user programs that were using it (QEMU in particular). Restore the PCI_MSIX_FLAGS_BIRMASK definition. [bhelgaas: changelog] Signed-off-by: Michael S. Tsirkin Signed-off-by: Bjorn Helgaas CC: stable@vger.kernel.org # v3.13+ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index efe3443..413417f 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -319,6 +319,7 @@ #define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ #define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ #define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ +#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ /* MSI-X Table entry format */ -- cgit v0.10.2 From 91a57731eed05714609855b1d72e6118b6b3ec6f Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Wed, 8 Jul 2015 17:34:43 +0300 Subject: ARM: DRA7: hwmod: fix gpmc hwmod GPMC smart idle is not really broken but it does not support smart idle with wakeup. Fixes: 556708fe8718 ("ARM: OMAP: DRA7: hwmod: Make gpmc software supervised as the smart idle is broken") Signed-off-by: Roger Quadros Reviewed-by: Paul Walmsley Signed-off-by: Paul Walmsley diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 2606c66..562247b 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = { .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; @@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = { .class = &dra7xx_gpmc_hwmod_class, .clkdm_name = "l3main1_clkdm", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ - .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, + .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS, .main_clk = "l3_iclk_div", .prcm = { .omap4 = { -- cgit v0.10.2 From 653cdc13a340ad1cef29f1bab0d05d0771fa1d57 Mon Sep 17 00:00:00 2001 From: Reinhard Speyerer Date: Tue, 14 Jul 2015 22:55:06 +0200 Subject: USB: qcserial/option: make AT URCs work for Sierra Wireless MC7305/MC7355 Tests with a Sierra Wireless MC7355 have shown that 1199:9041 devices also require the option_send_setup() code to be used on the USB interface for the AT port to make unsolicited response codes work correctly. Move these devices from the qcserial driver to the option driver like it has been done for the 1199:68c0 devices in commit d80c0d14183516f184a5ac88e11008ee4c7d2a2e ("USB: qcserial/option: make AT URCs work for Sierra Wireless MC73xx"). Signed-off-by: Reinhard Speyerer Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 19b85ee..876423b 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 9c63897..552dc9a 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ - {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ -- cgit v0.10.2 From aeec6cdad6cda5fdf26f937c0d15f101539d8185 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 16 Jul 2015 16:56:14 +0530 Subject: ARC/time: Migrate to new 'set-state' interface Migrate arc driver to the new 'set-state' interface provided by clockevents core, the earlier 'set-mode' interface is marked obsolete now. This also enables us to implement callbacks for new states of clockevent devices, for example: ONESHOT_STOPPED. Cc: Vineet Gupta Signed-off-by: Viresh Kumar Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 3364d2b..4294761 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta, return 0; } -static void arc_clkevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int arc_clkevent_set_periodic(struct clock_event_device *dev) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* - * At X Hz, 1 sec = 1000ms -> X cycles; - * 10ms -> X / 100 cycles - */ - arc_timer_event_setup(arc_get_core_freq() / HZ); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - default: - break; - } - - return; + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_get_core_freq() / HZ); + return 0; } static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { - .name = "ARC Timer0", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .mode = CLOCK_EVT_MODE_UNUSED, - .rating = 300, - .irq = TIMER0_IRQ, /* hardwired, no need for resources */ - .set_next_event = arc_clkevent_set_next_event, - .set_mode = arc_clkevent_set_mode, + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .irq = TIMER0_IRQ, /* hardwired, no need for resources */ + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, }; static irqreturn_t timer_irq_handler(int irq, void *dev_id) @@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() */ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); - int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; + int irq_reenable = clockevent_state_periodic(evt); /* * Any write to CTRL reg ACks the interrupt, we rewrite the -- cgit v0.10.2 From d05a76ab4d9ece9bd987a8c29e3f384ec9f7b211 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Thu, 16 Jul 2015 21:45:38 +0300 Subject: ARCv2: add knob for DIV_REV in Kconfig Being highly configurable core ARC HS among other features might be configured with or without DIV_REM_OPTION (hardware divider). That option when enabled adds following instructions: div, divu, rem, remu. By default ARC HS38 has this option enabled. So we add here possibility to disable usage of hardware divider by compiler. Signed-off-by: Alexey Brodkin Signed-off-by: Vineet Gupta diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 91cf405..f7cfa7d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -379,6 +379,10 @@ config ARC_HAS_LL64 dest operands with 2 possible source operands. default y +config ARC_HAS_DIV_REM + bool "Insn: div, divu, rem, remu" + default y + config ARC_HAS_RTC bool "Local 64-bit r/o cycle counter" default n diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 46d8731..8a27a48 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape +ifdef CONFIG_ISA_ARCV2 + ifndef CONFIG_ARC_HAS_LL64 -cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 +cflags-y += -mno-ll64 +endif + +ifndef CONFIG_ARC_HAS_DIV_REM +cflags-y += -mno-div-rem +endif + endif cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables -- cgit v0.10.2 From 6da3700c98cdc8360f55c5510915efae1d66deea Mon Sep 17 00:00:00 2001 From: Pieter Hollants Date: Mon, 20 Jul 2015 11:56:17 +0200 Subject: USB: qcserial: Add support for Dell Wireless 5809e 4G Modem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added the USB IDs 0x413c:0x81b1 for the "Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card", a Dell-branded Sierra Wireless EM7305 LTE card in M.2 form factor, used eg. in Dell's Latitude E7540 Notebook series. "lsusb -v" output for this device: Bus 002 Device 003: ID 413c:81b1 Dell Computer Corp. Device Descriptor: bLength 18 bDescriptorType 1 bcdUSB 2.00 bDeviceClass 0 bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 64 idVendor 0x413c Dell Computer Corp. idProduct 0x81b1 bcdDevice 0.06 iManufacturer 1 Sierra Wireless, Incorporated iProduct 2 Dell Wireless 5809e Gobi™ 4G LTE Mobile Broadband Card iSerial 3 bNumConfigurations 2 Configuration Descriptor: bLength 9 bDescriptorType 2 wTotalLength 204 bNumInterfaces 4 bConfigurationValue 1 iConfiguration 0 bmAttributes 0xe0 Self Powered Remote Wakeup MaxPower 500mA Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 0 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x81 EP 1 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x01 EP 1 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 2 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 0 bInterfaceProtocol 0 iInterface 0 ** UNRECOGNIZED: 05 24 00 10 01 ** UNRECOGNIZED: 05 24 01 00 00 ** UNRECOGNIZED: 04 24 02 02 ** UNRECOGNIZED: 05 24 06 00 00 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x83 EP 3 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x000c 1x 12 bytes bInterval 9 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x82 EP 2 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x02 EP 2 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 3 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 0 bInterfaceProtocol 0 iInterface 0 ** UNRECOGNIZED: 05 24 00 10 01 ** UNRECOGNIZED: 05 24 01 00 00 ** UNRECOGNIZED: 04 24 02 02 ** UNRECOGNIZED: 05 24 06 00 00 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x85 EP 5 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x000c 1x 12 bytes bInterval 9 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x84 EP 4 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x03 EP 3 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 8 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x87 EP 7 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x000a 1x 10 bytes bInterval 9 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x86 EP 6 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x04 EP 4 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 ** UNRECOGNIZED: 2c ff 42 49 53 54 00 01 07 f5 40 f6 00 00 00 00 01 f7 c4 09 02 f8 c4 09 03 f9 88 13 04 fa 10 27 05 fb 10 27 06 fc c4 09 07 fd c4 09 Configuration Descriptor: bLength 9 bDescriptorType 2 wTotalLength 95 bNumInterfaces 2 bConfigurationValue 2 iConfiguration 0 bmAttributes 0xe0 Self Powered Remote Wakeup MaxPower 500mA Interface Association: bLength 8 bDescriptorType 11 bFirstInterface 12 bInterfaceCount 2 bFunctionClass 2 Communications bFunctionSubClass 14 bFunctionProtocol 0 iFunction 0 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 12 bAlternateSetting 0 bNumEndpoints 1 bInterfaceClass 2 Communications bInterfaceSubClass 14 bInterfaceProtocol 0 iInterface 0 CDC Header: bcdCDC 1.10 CDC Union: bMasterInterface 12 bSlaveInterface 13 CDC MBIM: bcdMBIMVersion 1.00 wMaxControlMessage 4096 bNumberFilters 32 bMaxFilterSize 128 wMaxSegmentSize 1500 bmNetworkCapabilities 0x20 8-byte ntb input size CDC MBIM Extended: bcdMBIMExtendedVersion 1.00 bMaxOutstandingCommandMessages 64 wMTU 1500 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x82 EP 2 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 9 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 13 bAlternateSetting 0 bNumEndpoints 0 bInterfaceClass 10 CDC Data bInterfaceSubClass 0 bInterfaceProtocol 2 iInterface 0 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 13 bAlternateSetting 1 bNumEndpoints 2 bInterfaceClass 10 CDC Data bInterfaceSubClass 0 bInterfaceProtocol 2 iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x81 EP 1 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x01 EP 1 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 0 Device Qualifier (for other device speed): bLength 10 bDescriptorType 6 bcdUSB 2.00 bDeviceClass 0 bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 64 bNumConfigurations 2 Device Status: 0x0000 (Bus Powered) Signed-off-by: Pieter Hollants Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 552dc9a..d156545 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -157,6 +157,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ -- cgit v0.10.2 From 929423fa83e5b75e94101b280738b9a5a376a0e1 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 20 Jul 2015 13:49:39 +0200 Subject: xen: release lock occasionally during ballooning When dom0 is being ballooned balloon_process() will hold the balloon mutex until it is finished. This will block e.g. creation of new domains as the device backends for the new domain need some autoballooned pages for the ring buffers. Avoid this by releasing the balloon mutex from time to time during ballooning. Adjust the comment above balloon_process() regarding multiple instances of balloon_process(). Instead of open coding it, just use cond_resched(). Signed-off-by: Juergen Gross Signed-off-by: David Vrabel diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index fd93369..bf4a23c 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) } /* - * We avoid multiple worker processes conflicting via the balloon mutex. + * As this is a work item it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. @@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work) enum bp_state state = BP_DONE; long credit; - mutex_lock(&balloon_mutex); do { + mutex_lock(&balloon_mutex); + credit = current_credit(); if (credit > 0) { @@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work) state = update_schedule(state); -#ifndef CONFIG_PREEMPT - if (need_resched()) - schedule(); -#endif + mutex_unlock(&balloon_mutex); + + cond_resched(); + } while (credit && state == BP_DONE); /* Schedule more work if there is some still to be done. */ if (state == BP_EAGAIN) schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); - - mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ -- cgit v0.10.2 From 21481f2cfef9a79d3676916ef9424b1a7794776c Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 20 Jul 2015 17:19:17 +0300 Subject: ARCv2: lib: memcpy: Missing PREFETCHW Signed-off-by: Vineet Gupta diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index 1b2b3ac..0cab0b8 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -206,7 +206,7 @@ unalignedOffby3: ld.ab r6, [r1, 4] prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetch [r3, 32] ;Prefetch the next write location + prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 -- cgit v0.10.2 From 262137bca7cfbe690f8e904822b68f720998324a Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 20 Jul 2015 12:05:03 +0300 Subject: ARCv2: lib: memset: Don't assume 64-bit load/stores There are configurations which may not have LDD/STD Signed-off-by: Claudiu Zissulescu Signed-off-by: Alexey Brodkin Signed-off-by: Vineet Gupta diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 92d573c..365b183 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -10,12 +10,6 @@ #undef PREALLOC_NOT_AVAIL -#ifdef PREALLOC_NOT_AVAIL -#define PREWRITE(A,B) prefetchw [(A),(B)] -#else -#define PREWRITE(A,B) prealloc [(A),(B)] -#endif - ENTRY(memset) prefetchw [r0] ; Prefetch the write location mov.f 0, r2 @@ -51,9 +45,15 @@ ENTRY(memset) ;;; Convert len to Dwords, unfold x8 lsr.f lp_count, lp_count, 6 + lpnz @.Lset64bytes ;; LOOP START - PREWRITE(r3, 64) ;Prefetch the next write location +#ifdef PREALLOC_NOT_AVAIL + prefetchw [r3, 64] ;Prefetch the next write location +#else + prealloc [r3, 64] +#endif +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -62,16 +62,45 @@ ENTRY(memset) std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset64bytes: lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START prefetchw [r3, 32] ;Prefetch the next write location +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset32bytes: and.f lp_count, r2, 0x1F ;Last remaining 31 bytes -- cgit v0.10.2 From c2227a39a078473115910512aa0f8d53bd915e60 Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Tue, 7 Jul 2015 10:16:37 +0800 Subject: nfsd: Drop BUG_ON and ignore SECLABEL on absent filesystem On an absent filesystem (one served by another server), we need to be able to handle requests for certain attributest (like fs_locations, so the client can find out which server does have the filesystem), but others we can't. We forgot to take that into account when adding another attribute bitmask work for the SECURITY_LABEL attribute. There an export entry with the "refer" option can result in: [ 88.414272] kernel BUG at fs/nfsd/nfs4xdr.c:2249! [ 88.414828] invalid opcode: 0000 [#1] SMP [ 88.415368] Modules linked in: rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache nfsd xfs libcrc32c iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi iosf_mbi ppdev btrfs coretemp crct10dif_pclmul crc32_pclmul crc32c_intel xor ghash_clmulni_intel raid6_pq vmw_balloon parport_pc parport i2c_piix4 shpchp vmw_vmci acpi_cpufreq auth_rpcgss nfs_acl lockd grace sunrpc vmwgfx drm_kms_helper ttm drm mptspi mptscsih serio_raw mptbase e1000 scsi_transport_spi ata_generic pata_acpi [last unloaded: nfsd] [ 88.417827] CPU: 0 PID: 2116 Comm: nfsd Not tainted 4.0.7-300.fc22.x86_64 #1 [ 88.418448] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/20/2014 [ 88.419093] task: ffff880079146d50 ti: ffff8800785d8000 task.ti: ffff8800785d8000 [ 88.419729] RIP: 0010:[] [] nfsd4_encode_fattr+0x820/0x1f00 [nfsd] [ 88.420376] RSP: 0000:ffff8800785db998 EFLAGS: 00010206 [ 88.421027] RAX: 0000000000000001 RBX: 000000000018091a RCX: ffff88006668b980 [ 88.421676] RDX: 00000000fffef7fc RSI: 0000000000000000 RDI: ffff880078d05000 [ 88.422315] RBP: ffff8800785dbb58 R08: ffff880078d043f8 R09: ffff880078d4a000 [ 88.422968] R10: 0000000000010000 R11: 0000000000000002 R12: 0000000000b0a23a [ 88.423612] R13: ffff880078d05000 R14: ffff880078683100 R15: ffff88006668b980 [ 88.424295] FS: 0000000000000000(0000) GS:ffff88007c600000(0000) knlGS:0000000000000000 [ 88.424944] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 88.425597] CR2: 00007f40bc370f90 CR3: 0000000035af5000 CR4: 00000000001407f0 [ 88.426285] Stack: [ 88.426921] ffff8800785dbaa8 ffffffffa049e4af ffff8800785dba08 ffffffff813298f0 [ 88.427585] ffff880078683300 ffff8800769b0de8 0000089d00000001 0000000087f805e0 [ 88.428228] ffff880000000000 ffff880079434a00 0000000000000000 ffff88006668b980 [ 88.428877] Call Trace: [ 88.429527] [] ? exp_get_by_name+0x7f/0xb0 [nfsd] [ 88.430168] [] ? inode_doinit_with_dentry+0x210/0x6a0 [ 88.430807] [] ? d_lookup+0x2e/0x60 [ 88.431449] [] ? dput+0x33/0x230 [ 88.432097] [] ? mntput+0x24/0x40 [ 88.432719] [] ? path_put+0x22/0x30 [ 88.433340] [] ? nfsd_cross_mnt+0xb7/0x1c0 [nfsd] [ 88.433954] [] nfsd4_encode_dirent+0x1b0/0x3d0 [nfsd] [ 88.434601] [] ? nfsd4_encode_getattr+0x40/0x40 [nfsd] [ 88.435172] [] nfsd_readdir+0x1c1/0x2a0 [nfsd] [ 88.435710] [] ? nfsd_direct_splice_actor+0x20/0x20 [nfsd] [ 88.436447] [] nfsd4_encode_readdir+0x120/0x220 [nfsd] [ 88.437011] [] nfsd4_encode_operation+0x7d/0x190 [nfsd] [ 88.437566] [] nfsd4_proc_compound+0x24d/0x6f0 [nfsd] [ 88.438157] [] nfsd_dispatch+0xc3/0x220 [nfsd] [ 88.438680] [] svc_process_common+0x43b/0x690 [sunrpc] [ 88.439192] [] svc_process+0x103/0x1b0 [sunrpc] [ 88.439694] [] nfsd+0x117/0x190 [nfsd] [ 88.440194] [] ? nfsd_destroy+0x90/0x90 [nfsd] [ 88.440697] [] kthread+0xd8/0xf0 [ 88.441260] [] ? kthread_worker_fn+0x180/0x180 [ 88.441762] [] ret_from_fork+0x58/0x90 [ 88.442322] [] ? kthread_worker_fn+0x180/0x180 [ 88.442879] Code: 0f 84 93 05 00 00 83 f8 ea c7 85 a0 fe ff ff 00 00 27 30 0f 84 ba fe ff ff 85 c0 0f 85 a5 fe ff ff e9 e3 f9 ff ff 0f 1f 44 00 00 <0f> 0b 66 0f 1f 44 00 00 be 04 00 00 00 4c 89 ef 4c 89 8d 68 fe [ 88.444052] RIP [] nfsd4_encode_fattr+0x820/0x1f00 [nfsd] [ 88.444658] RSP [ 88.445232] ---[ end trace 6cb9d0487d94a29f ]--- Signed-off-by: Kinglong Mee Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 5463385..75e0563 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ FATTR4_WORD0_RDATTR_ERROR) #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID +#define WORD2_ABSENT_FS_ATTRS 0 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL static inline __be32 @@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp, { return 0; } #endif -static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) +static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err) { /* As per referral draft: */ if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || @@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) } *bmval0 &= WORD0_ABSENT_FS_ATTRS; *bmval1 &= WORD1_ABSENT_FS_ATTRS; + *bmval2 &= WORD2_ABSENT_FS_ATTRS; return 0; } @@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); if (exp->ex_fslocs.migrated) { - BUG_ON(bmval[2]); - status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err); + status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err); if (status) goto out; } @@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, } #ifdef CONFIG_NFSD_V4_SECURITY_LABEL - if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || - bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { + if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) || + bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { err = security_inode_getsecctx(d_inode(dentry), &context, &contextlen); contextsupport = (err == 0); -- cgit v0.10.2 From 1ca4b88e7de23f6f86d2009101fe42d5b9dbf3de Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Thu, 9 Jul 2015 17:38:26 +0800 Subject: nfsd: Fix a file leak on nfsd4_layout_setlease failure If nfsd4_layout_setlease fails, nfsd will not put ls->ls_file. Fix commit c5c707f96f "nfsd: implement pNFS layout recalls". Signed-off-by: Kinglong Mee Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 6904213..ebf90e4 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, BUG_ON(!ls->ls_file); if (nfsd4_layout_setlease(ls)) { + fput(ls->ls_file); put_nfs4_file(fp); kmem_cache_free(nfs4_layout_stateid_cache, ls); return NULL; -- cgit v0.10.2 From 1342ff45bd4a43d4ef058aa419399437461f3c88 Mon Sep 17 00:00:00 2001 From: Beata Michalska Date: Mon, 15 Jun 2015 10:12:46 +0200 Subject: ARM: dts: Update video-phy node with syscon phandle for exynos3250 As a follow-up to recent changes to Exynos mipi video phy driver, introducing support for PMU regmap in commit e4b3d38088df ("phy: exynos-video-mipi: Fix regression by adding support for PMU regmap") add a syscon phandle to video-phy node to bring back to life both MIPI DSI display and MIPI CSIS-2 camera sensor on Exynos3250. Signed-off-by: Beata Michalska Reviewed-by: Javier Martinez Canillas Signed-off-by: Krzysztof Kozlowski Signed-off-by: Kukjin Kim diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index d720133..2db9943 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -138,8 +138,8 @@ mipi_phy: video-phy@10020710 { compatible = "samsung,s5pv210-mipi-video-phy"; - reg = <0x10020710 8>; #phy-cells = <1>; + syscon = <&pmu_system_controller>; }; pd_cam: cam-power-domain@10023C00 { -- cgit v0.10.2 From 300bde79f3d86b1af6fcde7b7b2439ecffccbff4 Mon Sep 17 00:00:00 2001 From: Thomas Abraham Date: Fri, 3 Apr 2015 18:43:47 +0200 Subject: ARM: dts: add CPU OPP and regulator supply property for exynos4210 For Exynos4210 platforms, add CPU operating points and CPU regulator supply properties for migrating from Exynos specific cpufreq driver to using generic cpufreq driver. Cc: Doug Anderson Cc: Andreas Faerber Cc: Sachin Kamat Cc: Andreas Farber Cc: Javier Martinez Canillas Signed-off-by: Thomas Abraham [b.zolnierkie: removed exynos5250 and exynos5420 support for now] Signed-off-by: Bartlomiej Zolnierkiewicz [k.kozlowski: Rebased, moved cpu nodes to alphabetical position] Signed-off-by: Krzysztof Kozlowski Signed-off-by: Kukjin Kim diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts index e0abfc3..e050d85 100644 --- a/arch/arm/boot/dts/exynos4210-origen.dts +++ b/arch/arm/boot/dts/exynos4210-origen.dts @@ -127,6 +127,10 @@ }; }; +&cpu0 { + cpu0-supply = <&buck1_reg>; +}; + &fimd { pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 98f3ce6..ba34886 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -188,6 +188,10 @@ }; }; +&cpu0 { + cpu0-supply = <&varm_breg>; +}; + &dsi_0 { vddcore-supply = <&vusb_reg>; vddio-supply = <&vmipi_reg>; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index d4f2b11..775892b 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -548,6 +548,10 @@ }; }; +&cpu0 { + cpu0-supply = <&vdd_arm_reg>; +}; + &pinctrl_1 { hdmi_hpd: hdmi-hpd { samsung,pins = "gpx3-7"; diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 10d3c17..3e5ba66 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -40,6 +40,18 @@ device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x900>; + clocks = <&clock CLK_ARM_CLK>; + clock-names = "cpu"; + clock-latency = <160000>; + + operating-points = < + 1200000 1250000 + 1000000 1150000 + 800000 1075000 + 500000 975000 + 400000 975000 + 200000 950000 + >; cooling-min-level = <4>; cooling-max-level = <2>; #cooling-cells = <2>; /* min followed by max */ -- cgit v0.10.2 From 2f01a33bd26545c16fea7592697f7f15c416402b Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Tue, 21 Jul 2015 09:51:29 +0800 Subject: usb: chipidea: ehci_init_driver is intended to call one time The ehci_init_driver is used to initialize hcd APIs for each ehci controller driver, it is designed to be called only one time and before driver register is called. The current design will cause ehci_init_driver is called multiple times at probe process, it will cause hc_driver's initialization affect current running hcd. We run out NULL pointer dereference problem when one hcd is started by module_init, and the other is started by otg thread at SMP platform. The reason for this problem is ehci_init_driver will do memory copy for current uniform hc_driver, and this memory copy will do memset (as 0) first, so when the first hcd is running usb_add_hcd, and the second hcd may clear the uniform hc_driver's space (at ehci_init_driver), then the first hcd will meet NULL pointer at the same time. See below two logs: LOG_1: ci_hdrc ci_hdrc.0: EHCI Host Controller ci_hdrc ci_hdrc.0: new USB bus registered, assigned bus number 1 ci_hdrc ci_hdrc.1: doesn't support gadget Unable to handle kernel NULL pointer dereference at virtual address 00000014 pgd = 80004000 [00000014] *pgd=00000000 Internal error: Oops: 805 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 108 Comm: kworker/u8:2 Not tainted 3.14.38-222193-g24b2734-dirty #25 Workqueue: ci_otg ci_otg_work task: d839ec00 ti: d8400000 task.ti: d8400000 PC is at ehci_run+0x4c/0x284 LR is at _raw_spin_unlock_irqrestore+0x28/0x54 pc : [<8041f9a0>] lr : [<8070ea84>] psr: 60000113 sp : d8401e30 ip : 00000000 fp : d8004400 r10: 00000001 r9 : 00000001 r8 : 00000000 r7 : 00000000 r6 : d8419940 r5 : 80dd24c0 r4 : d8419800 r3 : 8001d060 r2 : 00000000 r1 : 00000001 r0 : 00000000 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c53c7d Table: 1000404a DAC: 00000015 Process kworker/u8:2 (pid: 108, stack limit = 0xd8400238) Stack: (0xd8401e30 to 0xd8402000) 1e20: d87523c0 d8401e48 66667562 d8419800 1e40: 00000000 00000000 d8419800 00000000 00000000 00000000 d84198b0 8040fcdc 1e60: 00000000 80dd320c d8477610 d8419c00 d803d010 d8419800 00000000 00000000 1e80: d8004400 00000000 d8400008 80431494 80431374 d803d100 d803d010 d803d1ac 1ea0: 00000000 80432428 804323d4 d803d100 00000001 80435eb8 80e0d0bc d803d100 1ec0: 00000006 80436458 00000000 d803d100 80e92ec8 80436f44 d803d010 d803d100 1ee0: d83fde00 8043292c d8752710 d803d1f4 d803d010 8042ddfc 8042ddb8 d83f3b00 1f00: d803d1f4 80042b60 00000000 00000003 00000001 00000001 80054598 d83f3b00 1f20: d8004400 d83f3b18 d8004414 d8400000 80e3957b 00000089 d8004400 80043814 1f40: d839ec00 00000000 d83fcd80 d83f3b00 800436e4 00000000 00000000 00000000 1f60: 00000000 80048f34 00000000 00000000 00000000 d83f3b00 00000000 00000000 1f80: d8401f80 d8401f80 00000000 00000000 d8401f90 d8401f90 d8401fac d83fcd80 1fa0: 80048e68 00000000 00000000 8000e538 00000000 00000000 00000000 00000000 1fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 1fe0: 00000000 00000000 00000000 00000000 00000013 00000000 00000000 00000000 [<8041f9a0>] (ehci_run) from [<8040fcdc>] (usb_add_hcd+0x248/0x6e8) [<8040fcdc>] (usb_add_hcd) from [<80431494>] (host_start+0x120/0x2e4) [<80431494>] (host_start) from [<80432428>] (ci_otg_start_host+0x54/0xbc) [<80432428>] (ci_otg_start_host) from [<80435eb8>] (otg_set_protocol+0xa4/0xd0) [<80435eb8>] (otg_set_protocol) from [<80436458>] (otg_set_state+0x574/0xc58) [<80436458>] (otg_set_state) from [<80436f44>] (otg_statemachine+0x408/0x46c) [<80436f44>] (otg_statemachine) from [<8043292c>] (ci_otg_fsm_work+0x3c/0x190) [<8043292c>] (ci_otg_fsm_work) from [<8042ddfc>] (ci_otg_work+0x44/0x1c4) [<8042ddfc>] (ci_otg_work) from [<80042b60>] (process_one_work+0xf4/0x35c) [<80042b60>] (process_one_work) from [<80043814>] (worker_thread+0x130/0x3bc) [<80043814>] (worker_thread) from [<80048f34>] (kthread+0xcc/0xe4) [<80048f34>] (kthread) from [<8000e538>] (ret_from_fork+0x14/0x3c) Code: e5953018 e3530000 0a000000 e12fff33 (e5878014) LOG_2: ci_hdrc ci_hdrc.0: EHCI Host Controller ci_hdrc ci_hdrc.0: new USB bus registered, assigned bus number 1 ci_hdrc ci_hdrc.1: doesn't support gadget Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = 80004000 [00000000] *pgd=00000000 In Online 00:00ternal e Offline rror: Oops: 80000005 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 108 Comm: kworker/u8:2 Not tainted 3.14.38-02007-g24b2734-dirty #127 Workque Online 00:00ue: ci_o Offline tg ci_otg_work Online 00:00task: d8 Offline 39ec00 ti: d83ea000 task.ti: d83ea000 PC is at 0x0 LR is at usb_add_hcd+0x248/0x6e8 pc : [<00000000>] lr : [<8040f644>] psr: 60000113 sp : d83ebe60 ip : 00000000 fp : d8004400 r10: 00000001 r9 : 00000001 r8 : d85fd4b0 r7 : 00000000 r6 : 00000000 r5 : 00000000 r4 : d85fd400 r3 : 00000000 r2 : d85fd4f4 r1 : 80410178 r0 : d85fd400 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c53c7d Table: 1000404a DAC: 00000015 Process kworker/u8:2 (pid: 108, stack limit = 0xd83ea238) Stack: (0xd83ebe60 to 0xd83ec000) be60: 00000000 80dd920c d8654e10 d85fd800 d803e010 d85fd400 00000000 00000000 be80: d8004400 00000000 d83ea008 80430e34 80430d14 d803e100 d803e010 d803e1ac bea0: 00000000 80431dc8 80431d74 d803e100 00000001 80435858 80e130bc d803e100 bec0: 00000006 80435df8 00000000 d803e100 80e98ec8 804368e4 d803e010 d803e100 bee0: d86e8100 804322cc d86cf050 d803e1f4 d803e010 8042d79c 8042d758 d83cf900 bf00: d803e1f4 80042b78 00000000 00000003 00000001 00000001 800545e8 d83cf900 bf20: d8004400 d83cf918 d8004414 d83ea000 80e3f57b 00000089 d8004400 8004382c bf40: d839ec00 00000000 d8393780 d83cf900 800436fc 00000000 00000000 00000000 bf60: 00000000 80048f50 80e019f4 00000000 0000264c d83cf900 00000000 00000000 bf80: d83ebf80 d83ebf80 00000000 00000000 d83ebf90 d83ebf90 d83ebfac d8393780 bfa0: 80048e84 00000000 00000000 8000e538 00000000 00000000 00000000 00000000 bfc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 bfe0: 00000000 00000000 00000000 00000000 00000013 00000000 ee66e85d 133ebd03 [<804 Online 00:000f644>] Offline (usb_add_hcd) from [<80430e34>] (host_start+0x120/0x2e4) [<80430e34>] (host_start) from [<80431dc8>] (ci_otg_start_host+0x54/0xbc) [<80431dc8>] (ci_otg_start_host) from [<80435858>] (otg_set_protocol+0xa4/0xd0) [<80435858>] (otg_set_protocol) from [<80435df8>] (otg_set_state+0x574/0xc58) [<80435df8>] (otg_set_state) from [<804368e4>] (otg_statemachine+0x408/0x46c) [<804368e4>] (otg_statemachine) from [<804322cc>] (ci_otg_fsm_work+0x3c/0x190) [<804322cc>] (ci_otg_fsm_work) from [<8042d79c>] (ci_otg_work+0x44/0x1c4) [<8042d79c>] (ci_otg_work) from [<80042b78>] (process_one_work+0xf4/0x35c) [<80042b78>] (process_one_work) from [<8004382c>] (worker_thread+0x130/0x3bc) [<8004382c>] (worker_thread) from [<80048f50>] (kthread+0xcc/0xe4) [<80048f50>] (kthread) from [<8000e538>] (ret_from_fork+0x14/0x3c) Code: bad PC value Cc: Jun Li Cc: Cc: Alan Stern Acked-by: Alan Stern Signed-off-by: Peter Chen diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 74fea4f..3ad48e1 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = { }, }; -module_platform_driver(ci_hdrc_driver); +static int __init ci_hdrc_platform_register(void) +{ + ci_hdrc_host_driver_init(); + return platform_driver_register(&ci_hdrc_driver); +} +module_init(ci_hdrc_platform_register); + +static void __exit ci_hdrc_platform_unregister(void) +{ + platform_driver_unregister(&ci_hdrc_driver); +} +module_exit(ci_hdrc_platform_unregister); MODULE_ALIAS("platform:ci_hdrc"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 6cf87b8..7161439 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci) rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; + return 0; +} + +void ci_hdrc_host_driver_init(void) +{ ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; - - return 0; } diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h index 5707bf3..0f12f13 100644 --- a/drivers/usb/chipidea/host.h +++ b/drivers/usb/chipidea/host.h @@ -5,6 +5,7 @@ int ci_hdrc_host_init(struct ci_hdrc *ci); void ci_hdrc_host_destroy(struct ci_hdrc *ci); +void ci_hdrc_host_driver_init(void); #else @@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci) } +static void ci_hdrc_host_driver_init(void) +{ + +} + #endif #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ -- cgit v0.10.2 From 6dd3f13e4239a8c2b1e60687d7321bc0df614f33 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Thu, 16 Jul 2015 18:23:53 +0200 Subject: kbuild: Do not pick up ARCH_{CPP,A,C}FLAGS from the environment Initialize the ARCH_* overrides before including the arch Makefile, to avoid picking up the values from the environment. The variables can still be overriden on the make command line, but this won't happen by accident. Signed-off-by: Michal Marek diff --git a/Makefile b/Makefile index 13270c0..dc868bc 100644 --- a/Makefile +++ b/Makefile @@ -597,6 +597,11 @@ endif # $(dot-config) # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default +# values of the respective KBUILD_* variables +ARCH_CPPFLAGS := +ARCH_AFLAGS := +ARCH_CFLAGS := include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) -- cgit v0.10.2 From 3d1450d54a4fc277fc4598acf2335f74b66b08fc Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 7 Jul 2015 20:26:07 +0200 Subject: Makefile: Force gzip and xz on module install Running `make modules_install` ordinarily will overwrite existing modules. This is the desired behavior, and is how pretty much every other `make install` target works. However, if CONFIG_MODULE_COMPRESS is enabled, modules are passed through gzip and xz which then do the file writing. Both gzip and xz will error out if the file already exists, unless -f is passed. This patch adds -f so that the behavior is uniform. Signed-off-by: Jason A. Donenfeld Signed-off-by: Michal Marek diff --git a/Makefile b/Makefile index dc868bc..c5234fe 100644 --- a/Makefile +++ b/Makefile @@ -852,10 +852,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n + mod_compress_cmd = gzip -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz + mod_compress_cmd = xz -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd -- cgit v0.10.2 From 450ed0db011d200a691591a75fdbcd7cae1a9b4a Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Thu, 16 Jul 2015 21:45:17 +0300 Subject: ARCv2: allow selection of page size for MMUv4 MMUv4 also supports the configurable page size as MMUv3. Signed-off-by: Alexey Brodkin Signed-off-by: Vineet Gupta diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index f7cfa7d..a5fccdf 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K config ARC_PAGE_SIZE_16K bool "16KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 config ARC_PAGE_SIZE_4K bool "4KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 endchoice -- cgit v0.10.2 From 3ba3a73e9f204ce7904cae7d14be399b467c4fef Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 20 Jul 2015 20:45:51 +1000 Subject: powerpc/powernv/ioda2: Fix calculation for memory allocated for TCE table The existing code stores the amount of memory allocated for a TCE table. At the moment it uses @offset which is a virtual offset in the TCE table which is only correct for a one level tables and it does not include memory allocated for intermediate levels. When multilevel TCE table is requested, WARN_ON in tce_iommu_create_table() prints a warning. This adds an additional counter to pnv_pci_ioda2_table_do_alloc_pages() to count actually allocated memory. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 5738d31..85cbc96 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb) static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, unsigned levels, unsigned long limit, - unsigned long *current_offset) + unsigned long *current_offset, unsigned long *total_allocated) { struct page *tce_mem = NULL; __be64 *addr, *tmp; @@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, } addr = page_address(tce_mem); memset(addr, 0, allocated); + *total_allocated += allocated; --levels; if (!levels) { @@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, for (i = 0; i < entries; ++i) { tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, - levels, limit, current_offset); + levels, limit, current_offset, total_allocated); if (!tmp) break; @@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, struct iommu_table *tbl) { void *addr; - unsigned long offset = 0, level_shift; + unsigned long offset = 0, level_shift, total_allocated = 0; const unsigned window_shift = ilog2(window_size); unsigned entries_shift = window_shift - page_shift; unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); @@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - levels, tce_table_size, &offset); + levels, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) @@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; - tbl->it_allocated_size = offset; + tbl->it_allocated_size = total_allocated; pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", window_size, tce_table_size, bus_offset); -- cgit v0.10.2 From 120d200a86273d1840d091959dca65617cd9eff2 Mon Sep 17 00:00:00 2001 From: Luis Henriques Date: Fri, 17 Jul 2015 14:20:31 +0100 Subject: macintosh/ans-lcd: fix build failure after module_init/exit relocation After commit 0fd972a7d91d ("module: relocate module_init from init.h to module.h") ans-lcd module fails to build with: drivers/macintosh/ans-lcd.c:201:1: warning: data definition has no type or storage class [enabled by default] module_init(anslcd_init); ^ drivers/macintosh/ans-lcd.c:201:1: error: type defaults to 'int' in declaration of 'module_init' [-Werror=implicit-int] drivers/macintosh/ans-lcd.c:201:1: warning: parameter names (without types) in function declaration [enabled by default] drivers/macintosh/ans-lcd.c:202:1: warning: data definition has no type or storage class [enabled by default] module_exit(anslcd_exit); ^ drivers/macintosh/ans-lcd.c:202:1: error: type defaults to 'int' in declaration of 'module_exit' [-Werror=implicit-int] drivers/macintosh/ans-lcd.c:202:1: warning: parameter names (without types) in function declaration [enabled by default] drivers/macintosh/ans-lcd.c:155:1: warning: 'anslcd_init' defined but not used [-Wunused-function] anslcd_init(void) ^ drivers/macintosh/ans-lcd.c:195:1: warning: 'anslcd_exit' defined but not used [-Wunused-function] anslcd_exit(void) ^ This commit fixes it by replacing linux/init.h by linux/module.h. Fixes: 0fd972a7d91d ("module: relocate module_init from init.h to module.h") Signed-off-by: Luis Henriques Acked-by: Paul Gortmaker Signed-off-by: Michael Ellerman diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index 1a57e88..cd35079 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include -- cgit v0.10.2 From 6f043b50da8e03bdcc5703fd37ea45bc6892432f Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 21 Jul 2015 22:07:47 -0700 Subject: crypto: qat - Fix invalid synchronization between register/unregister sym algs The synchronization method used atomic was bogus. Use a proper synchronization with mutex. Cc: stable@vger.kernel.org Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 067402c..df427c0 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -73,7 +73,8 @@ ICP_QAT_HW_CIPHER_KEY_CONVERT, \ ICP_QAT_HW_CIPHER_DECRYPT) -static atomic_t active_dev; +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; struct qat_alg_buf { uint32_t len; @@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { { int qat_algs_register(void) { - if (atomic_add_return(1, &active_dev) == 1) { + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs == 1) { int i; for (i = 0; i < ARRAY_SIZE(qat_algs); i++) @@ -1289,21 +1293,25 @@ int qat_algs_register(void) CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; - return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); } - return 0; + mutex_unlock(&algs_lock); + return ret; } int qat_algs_unregister(void) { - if (atomic_sub_return(1, &active_dev) == 0) - return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); - return 0; + int ret = 0; + + mutex_lock(&algs_lock); + if (--active_devs == 0) + ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + mutex_unlock(&algs_lock); + return ret; } int qat_algs_init(void) { - atomic_set(&active_dev, 0); crypto_get_default_rng(); return 0; } -- cgit v0.10.2 From f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2015 18:05:35 +0800 Subject: crypto: ixp4xx - Remove bogus BUG_ON on scattered dst buffer This patch removes a bogus BUG_ON in the ablkcipher path that triggers when the destination buffer is different from the source buffer and is scattered. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 7ba495f..402631a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) crypt->mode |= NPE_OP_NOT_IN_PLACE; /* This was never tested by Intel * for more than one dst buffer, I think. */ - BUG_ON(req->dst->length < nbytes); req_ctx->dst = NULL; if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, flags, DMA_FROM_DEVICE)) -- cgit v0.10.2 From 9a258afa928b45e6dd2efcac46ccf7eea705d35a Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 16 Jul 2015 16:16:44 +0300 Subject: ARM: OMAP2+: hwmod: Fix _wait_target_ready() for hwmods without sysc For hwmods without sysc, _init_mpu_rt_base(oh) won't be called and so _find_mpu_rt_port(oh) will return NULL thus preventing ready state check on those modules after the module is enabled. This can potentially cause a bus access error if the module is accessed before the module is ready. Fix this by unconditionally calling _init_mpu_rt_base() during hwmod _init(). Do ioremap only if we need SYSC access. Eventhough _wait_target_ready() check doesn't really need MPU RT port but just the PRCM registers, we still mandate that the hwmod must have an MPU RT port if ready state check needs to be done. Else it would mean that the module is not accessible by MPU so there is no point in waiting for target to be ready. e.g. this fixes the below DCAN bus access error on AM437x-gp-evm. [ 16.672978] ------------[ cut here ]------------ [ 16.677885] WARNING: CPU: 0 PID: 1580 at drivers/bus/omap_l3_noc.c:147 l3_interrupt_handler+0x234/0x35c() [ 16.687946] 44000000.ocp:L3 Custom Error: MASTER M2 (64-bit) TARGET L4_PER_0 (Read): Data Access in User mode during Functional access [ 16.700654] Modules linked in: xhci_hcd btwilink ti_vpfe dwc3 videobuf2_core ov2659 bluetooth v4l2_common videodev ti_am335x_adc kfifo_buf industrialio c_can_platform videobuf2_dma_contig media snd_soc_tlv320aic3x pixcir_i2c_ts c_can dc [ 16.731144] CPU: 0 PID: 1580 Comm: rpc.statd Not tainted 3.14.26-02561-gf733aa036398 #180 [ 16.739747] Backtrace: [ 16.742336] [] (dump_backtrace) from [] (show_stack+0x18/0x1c) [ 16.750285] r6:00000093 r5:00000009 r4:eab5b8a8 r3:00000000 [ 16.756252] [] (show_stack) from [] (dump_stack+0x20/0x28) [ 16.763870] [] (dump_stack) from [] (warn_slowpath_common+0x6c/0x8c) [ 16.772408] [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x38/0x40) [ 16.781550] r8:c05d1f90 r7:c0730844 r6:c0730448 r5:80080003 r4:ed0cd210 [ 16.788626] [] (warn_slowpath_fmt) from [] (l3_interrupt_handler+0x234/0x35c) [ 16.797968] r3:ed0cd480 r2:c0730508 [ 16.801747] [] (l3_interrupt_handler) from [] (handle_irq_event_percpu+0x54/0x1bc) [ 16.811533] r10:ed005600 r9:c084855b r8:0000002a r7:00000000 r6:00000000 r5:0000002a [ 16.819780] r4:ed0e6d80 [ 16.822453] [] (handle_irq_event_percpu) from [] (handle_irq_event+0x30/0x40) [ 16.831789] r10:eb2b6938 r9:eb2b6960 r8:bf011420 r7:fa240100 r6:00000000 r5:0000002a [ 16.840052] r4:ed005600 [ 16.842744] [] (handle_irq_event) from [] (handle_fasteoi_irq+0x74/0x128) [ 16.851702] r4:ed005600 r3:00000000 [ 16.855479] [] (handle_fasteoi_irq) from [] (generic_handle_irq+0x28/0x38) [ 16.864523] r4:0000002a r3:c0066164 [ 16.868294] [] (generic_handle_irq) from [] (handle_IRQ+0x38/0x8c) [ 16.876612] r4:c081c640 r3:00000202 [ 16.880380] [] (handle_IRQ) from [] (gic_handle_irq+0x30/0x5c) [ 16.888328] r6:eab5ba38 r5:c0804460 r4:fa24010c r3:00000100 [ 16.894303] [] (gic_handle_irq) from [] (__irq_svc+0x40/0x50) [ 16.902193] Exception stack(0xeab5ba38 to 0xeab5ba80) [ 16.907499] ba20: 00000000 00000006 [ 16.916108] ba40: fa1d0000 fa1d0008 ed3d3000 eab5bab4 ed3d3460 c0842af4 bf011420 eb2b6960 [ 16.924716] ba60: eb2b6938 eab5ba8c eab5ba90 eab5ba80 bf035220 bf07702c 600f0013 ffffffff [ 16.933317] r7:eab5ba6c r6:ffffffff r5:600f0013 r4:bf07702c [ 16.939317] [] (c_can_plat_read_reg_aligned_to_16bit [c_can_platform]) from [] (c_can_get_berr_counter+0x38/0x64 [c_can]) [ 16.952696] [] (c_can_get_berr_counter [c_can]) from [] (can_fill_info+0x124/0x15c [can_dev]) [ 16.963480] r5:ec8c9740 r4:ed3d3000 [ 16.967253] [] (can_fill_info [can_dev]) from [] (rtnl_fill_ifinfo+0x58c/0x8fc) [ 16.976749] r6:ec8c9740 r5:ed3d3000 r4:eb2b6780 [ 16.981613] [] (rtnl_fill_ifinfo) from [] (rtnl_dump_ifinfo+0xf0/0x1dc) [ 16.990401] r10:ec8c9740 r9:00000000 r8:00000000 r7:00000000 r6:ebd4d1b4 r5:ed3d3000 [ 16.998671] r4:00000000 [ 17.001342] [] (rtnl_dump_ifinfo) from [] (netlink_dump+0xa8/0x1e0) [ 17.009772] r10:00000000 r9:00000000 r8:c0503318 r7:ebf3e6c0 r6:ebd4d1b4 r5:ec8c9740 [ 17.018050] r4:ebd4d000 [ 17.020714] [] (netlink_dump) from [] (__netlink_dump_start+0x104/0x154) [ 17.029591] r6:eab5bd34 r5:ec8c9980 r4:ebd4d000 [ 17.034454] [] (__netlink_dump_start) from [] (rtnetlink_rcv_msg+0x110/0x1f4) [ 17.043778] r7:00000000 r6:ec8c9980 r5:00000f40 r4:ebf3e6c0 [ 17.049743] [] (rtnetlink_rcv_msg) from [] (netlink_rcv_skb+0xb4/0xc8) [ 17.058449] r8:eab5bdac r7:ec8c9980 r6:c05054f4 r5:ec8c9980 r4:ebf3e6c0 [ 17.065534] [] (netlink_rcv_skb) from [] (rtnetlink_rcv+0x24/0x2c) [ 17.073854] r6:ebd4d000 r5:00000014 r4:ec8c9980 r3:c0504110 [ 17.079846] [] (rtnetlink_rcv) from [] (netlink_unicast+0x180/0x1ec) [ 17.088363] r4:ed0c6800 r3:c0504110 [ 17.092113] [] (netlink_unicast) from [] (netlink_sendmsg+0x2ac/0x380) [ 17.100813] r10:00000000 r8:00000008 r7:ec8c9980 r6:ebd4d000 r5:eab5be70 r4:eab5bee4 [ 17.109083] [] (netlink_sendmsg) from [] (sock_sendmsg+0x90/0xb0) [ 17.117305] r10:00000000 r9:eab5a000 r8:becdda3c r7:0000000c r6:ea978400 r5:eab5be70 [ 17.125563] r4:c05103c4 [ 17.128225] [] (sock_sendmsg) from [] (SyS_sendto+0xb8/0xdc) [ 17.136001] r6:becdda5c r5:00000014 r4:ecd37040 [ 17.140876] [] (SyS_sendto) from [] (ret_fast_syscall+0x0/0x30) [ 17.148923] r10:00000000 r8:c000e804 r7:00000122 r6:becdda5c r5:0000000c r4:becdda5c [ 17.157169] ---[ end trace 2b71e15b38f58bad ]--- Fixes: 6423d6df1440 ("ARM: OMAP2+: hwmod: check for module address space during init") Signed-off-by: Roger Quadros Signed-off-by: Paul Walmsley Cc: diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d78c12e..486cc4d 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np, * registers. This address is needed early so the OCP registers that * are part of the device's address space can be ioremapped properly. * + * If SYSC access is not needed, the registers will not be remapped + * and non-availability of MPU access is not treated as an error. + * * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * -ENXIO on absent or invalid register target address space. */ @@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, _save_mpu_port_index(oh); + /* if we don't need sysc access we don't need to ioremap */ + if (!oh->class->sysc) + return 0; + + /* we can't continue without MPU PORT if we need sysc access */ if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return -ENXIO; @@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, oh->name); /* Extract the IO space from device tree blob */ - if (!np) + if (!np) { + pr_err("omap_hwmod: %s: no dt node\n", oh->name); return -ENXIO; + } va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { @@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data) oh->name, np->name); } - if (oh->class->sysc) { - r = _init_mpu_rt_base(oh, NULL, index, np); - if (r < 0) { - WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", - oh->name); - return 0; - } + r = _init_mpu_rt_base(oh, NULL, index, np); + if (r < 0) { + WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", + oh->name); + return 0; } r = _init_clocks(oh, NULL); -- cgit v0.10.2 From 111509294b9efafe0353423c8180e03db810bdb5 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Sun, 14 Jun 2015 15:41:49 +0100 Subject: mfd: arizona: Fix race between runtime suspend and IRQs The function arizona_irq_thread (the threaded handler for the arizona IRQs) calls pm_runtime_get_sync at the start to ensure that the chip is active as we handle the IRQ. If the chip is part way through a runtime suspend when an IRQ arrives the PM core will wait for the suspend to complete, before resuming. However, since commit 4f0216409f7c ("mfd: arizona: Add better support for system suspend") the runtime suspend function may call disable_irq, if the chip is going to fully power off, which will try to wait for any outstanding IRQs to complete. This results in deadlock as the IRQ thread is waiting for the PM operation to complete and the PM thread is waiting for the IRQ to complete. To avoid this situation we use disable_irq_nosync, which allows the suspending thread to finish the suspend without waiting for the IRQ to complete. This is safe because if an IRQ is being processed it can only be blocked at the pm_runtime_get_sync at the start of the handler otherwise it wouldn't be possible to suspend. Signed-off-by: Charles Keepax Signed-off-by: Lee Jones diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index bebf58a..e60bcd9 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev) arizona->has_fully_powered_off = true; - disable_irq(arizona->irq); + disable_irq_nosync(arizona->irq); arizona_enable_reset(arizona); regulator_bulk_disable(arizona->num_core_supplies, arizona->core_supplies); -- cgit v0.10.2 From 72e43164fd472f6c2659c8313b87da962322dbcf Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Sun, 14 Jun 2015 15:41:50 +0100 Subject: mfd: arizona: Fix initialisation of the PM runtime The PM runtime core by default assumes a chip is suspended when runtime PM is enabled. Currently the arizona driver enables runtime PM when the chip is fully active and then disables the DCVDD regulator at the end of arizona_dev_init. This however has several problems, firstly the if we reach the end of arizona_dev_init, we did not properly follow all the proceedures for shutting down the chip, and most notably we never marked the chip as cache only so any writes occurring between then and the next PM runtime resume will be lost. Secondly, if we are already resumed when we reach the end of dev_init, then at best we get unbalanced regulator enable/disables at work we lose DCVDD whilst we need it. Additionally, since the commit 4f0216409f7c ("mfd: arizona: Add better support for system suspend"), the PM runtime operations may disable/enable the IRQ, so the IRQs must now be enabled before we call any PM operations. This patch adds a call to pm_runtime_set_active to inform the PM core that the device is starting up active and moves the PM enabling to around the IRQ initialisation to avoid any PM callbacks happening until the IRQs are initialised. Cc: stable@vger.kernel.org # v3.5+ Signed-off-by: Charles Keepax Signed-off-by: Lee Jones diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index e60bcd9..a72ddb29 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.gpio_defaults[i]); } - pm_runtime_set_autosuspend_delay(arizona->dev, 100); - pm_runtime_use_autosuspend(arizona->dev); - pm_runtime_enable(arizona->dev); - /* Chip default */ if (!arizona->pdata.clk32k_src) arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; @@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.spk_fmt[i]); } + pm_runtime_set_active(arizona->dev); + pm_runtime_enable(arizona->dev); + /* Set up for interrupts */ ret = arizona_irq_init(arizona); if (ret != 0) goto err_reset; + pm_runtime_set_autosuspend_delay(arizona->dev, 100); + pm_runtime_use_autosuspend(arizona->dev); + arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", arizona_clkgen_err, arizona); arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", @@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona) goto err_irq; } -#ifdef CONFIG_PM - regulator_disable(arizona->dcvdd); -#endif - return 0; err_irq: -- cgit v0.10.2 From d12bbcd3ea4402704d13f687601dc5af1361a548 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 25 Jun 2015 02:20:42 +0200 Subject: platform/chrome: Don't make CHROME_PLATFORMS depends on X86 || ARM The Chrome platform support depends on X86 || ARM because there are only Chromebooks using those architectures. But only some drivers depend on a given architecture, and the ones that do already have a dependency on their specific Kconfig symbol entries. An option is to also make CHROME_PLATFORMS depends on || COMPILE_TEST but is more future proof to remove the dependency and let the drivers be built in all architectures if possible to have more build coverage. Acked-by: Olof Johansson Signed-off-by: Javier Martinez Canillas Signed-off-by: Lee Jones diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index cb13299..3271cd1 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -4,7 +4,6 @@ menuconfig CHROME_PLATFORMS bool "Platform support for Chrome hardware" - depends on X86 || ARM ---help--- Say Y here to get to see options for platform support for various Chromebooks and Chromeboxes. This option alone does -- cgit v0.10.2 From fb9caeedafe61599371d057696bff3baef01f455 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 25 Jun 2015 02:20:44 +0200 Subject: mfd: Remove MFD_CROS_EC_SPI depends on OF The ChromeOS EC SPI transport driver has a dependency on OF because it uses some OF helpers from the header. But there isn't a need for an explicit dependency since the header has stub functions if CONFIG_OF is not defined. Also, MFD_CROS_EC_SPI already depends on MFD_CROS_EC which in turn has a dependency on OF so in practice can't be selected without CONFIG_OF. Signed-off-by: Javier Martinez Canillas Signed-off-by: Lee Jones diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 6538159..3f68dd2 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C config MFD_CROS_EC_SPI tristate "ChromeOS Embedded Controller (SPI)" - depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF + depends on MFD_CROS_EC && CROS_EC_PROTO && SPI ---help--- If you say Y here, you get support for talking to the ChromeOS EC -- cgit v0.10.2 From d50babbe300eedf33ea5b00a12c5df3a05bd96c7 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 22 Jul 2015 14:40:08 +0800 Subject: xen-blkfront: introduce blkfront_gather_backend_features() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a bug when migrate from !feature-persistent host to feature-persistent host, because domU still thinks new host/backend doesn't support persistent. Dmesg like: backed has not unmapped grant: 839 backed has not unmapped grant: 773 backed has not unmapped grant: 773 backed has not unmapped grant: 773 backed has not unmapped grant: 839 The fix is to recheck feature-persistent of new backend in blkif_recover(). See: https://lkml.org/lkml/2015/5/25/469 As Roger suggested, we can split the part of blkfront_connect that checks for optional features, like persistent grants, indirect descriptors and flush/barrier features to a separate function and call it from both blkfront_connect and blkif_recover Acked-by: Roger Pau Monné Signed-off-by: Bob Liu Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fc770b7..f45f4e6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock); ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) static int blkfront_setup_indirect(struct blkfront_info *info); +static int blkfront_gather_backend_features(struct blkfront_info *info); static int get_id_from_freelist(struct blkfront_info *info) { @@ -1525,7 +1526,7 @@ static int blkif_recover(struct blkfront_info *info) info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; - rc = blkfront_setup_indirect(info); + rc = blkfront_gather_backend_features(info); if (rc) { kfree(copy); return rc; @@ -1726,20 +1727,13 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_info *info) { - unsigned int indirect_segments, segs; + unsigned int segs; int err, i; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-max-indirect-segments", "%u", &indirect_segments, - NULL); - if (err) { - info->max_indirect_segments = 0; + if (info->max_indirect_segments == 0) segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; - } else { - info->max_indirect_segments = min(indirect_segments, - xen_blkif_max_segments); + else segs = info->max_indirect_segments; - } err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); if (err) @@ -1803,6 +1797,68 @@ out_of_memory: } /* + * Gather all backend feature-* + */ +static int blkfront_gather_backend_features(struct blkfront_info *info) +{ + int err; + int barrier, flush, discard, persistent; + unsigned int indirect_segments; + + info->feature_flush = 0; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-barrier", "%d", &barrier, + NULL); + + /* + * If there's no "feature-barrier" defined, then it means + * we're dealing with a very old backend which writes + * synchronously; nothing to do. + * + * If there are barriers, then we use flush. + */ + if (!err && barrier) + info->feature_flush = REQ_FLUSH | REQ_FUA; + /* + * And if there is "feature-flush-cache" use that above + * barriers. + */ + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-flush-cache", "%d", &flush, + NULL); + + if (!err && flush) + info->feature_flush = REQ_FLUSH; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-discard", "%d", &discard, + NULL); + + if (!err && discard) + blkfront_setup_discard(info); + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-persistent", "%u", &persistent, + NULL); + if (err) + info->feature_persistent = 0; + else + info->feature_persistent = persistent; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-max-indirect-segments", "%u", &indirect_segments, + NULL); + if (err) + info->max_indirect_segments = 0; + else + info->max_indirect_segments = min(indirect_segments, + xen_blkif_max_segments); + + return blkfront_setup_indirect(info); +} + +/* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ @@ -1813,7 +1869,6 @@ static void blkfront_connect(struct blkfront_info *info) unsigned int physical_sector_size; unsigned int binfo; int err; - int barrier, flush, discard, persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -1870,48 +1925,7 @@ static void blkfront_connect(struct blkfront_info *info) if (err != 1) physical_sector_size = sector_size; - info->feature_flush = 0; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%d", &barrier, - NULL); - - /* - * If there's no "feature-barrier" defined, then it means - * we're dealing with a very old backend which writes - * synchronously; nothing to do. - * - * If there are barriers, then we use flush. - */ - if (!err && barrier) - info->feature_flush = REQ_FLUSH | REQ_FUA; - /* - * And if there is "feature-flush-cache" use that above - * barriers. - */ - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-flush-cache", "%d", &flush, - NULL); - - if (!err && flush) - info->feature_flush = REQ_FLUSH; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-discard", "%d", &discard, - NULL); - - if (!err && discard) - blkfront_setup_discard(info); - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-persistent", "%u", &persistent, - NULL); - if (err) - info->feature_persistent = 0; - else - info->feature_persistent = persistent; - - err = blkfront_setup_indirect(info); + err = blkfront_gather_backend_features(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); -- cgit v0.10.2 From 7b0767502b5db11cb1f0daef2d01f6d71b1192dc Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 22 Jul 2015 14:40:09 +0800 Subject: xen-blkfront: don't add indirect pages to list when !feature_persistent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We should consider info->feature_persistent when adding indirect page to list info->indirect_pages, else the BUG_ON() in blkif_free() would be triggered. When we are using persistent grants the indirect_pages list should always be empty because blkfront has pre-allocated enough persistent pages to fill all requests on the ring. CC: stable@vger.kernel.org Acked-by: Roger Pau Monné Signed-off-by: Bob Liu Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index f45f4e6..44b33d3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1135,8 +1135,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); - list_add(&indirect_page->lru, &info->indirect_pages); + if (!info->feature_persistent) { + indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); + list_add(&indirect_page->lru, &info->indirect_pages); + } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } -- cgit v0.10.2 From 53bc7dc004fecf39e0ba70f2f8d120a1444315d3 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 22 Jul 2015 14:40:10 +0800 Subject: xen-blkback: replace work_pending with work_busy in purge_persistent_gnt() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The BUG_ON() in purge_persistent_gnt() will be triggered when previous purge work haven't finished. There is a work_pending() before this BUG_ON, but it doesn't account if the work is still currently running. CC: stable@vger.kernel.org Acked-by: Roger Pau Monné Signed-off-by: Bob Liu Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 9121a2c..73c0404 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -382,8 +382,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) return; } - if (work_pending(&blkif->persistent_purge_work)) { - pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); + if (work_busy(&blkif->persistent_purge_work)) { + pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); return; } -- cgit v0.10.2 From bffc4496886683fac86d31e5d0cf9a22f8044e3d Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 19 Jul 2015 12:09:16 +0800 Subject: ASoC: cs4265: Fix setting dai format for Left/Right Justified MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The settings in current code does not match the datasheet, fix it. DAC Control - Address 03h DAC Digital Interface Format (Bits 5:4) DAC_DIF1 DAC_DIF0 Description 0 0 Left Justified, up to 24-bit data (default) 0 1 I²S, up to 24-bit data 1 0 Right-Justified, 16-bit Data 1 1 Right-Justified, 24-bit Data Transmitter Control 2 - Address 12h Transmitter Digital Interface Format (Bits 7:6) Tx_DIF1 Tx_DIF0 Description Format Figure 0 0 Left Justified, up to 24-bit data (default) 0 1 I²S, up to 24-bit data 1 0 Right-Justified, 16-bit Data 1 1 Right-Justified, 24-bit Data Signed-off-by: Axel Lin Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index d7ec4756..8e36198 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, case SND_SOC_DAIFMT_RIGHT_J: if (params_width(params) == 16) { snd_soc_update_bits(codec, CS4265_DAC_CTL, - CS4265_DAC_CTL_DIF, (1 << 5)); + CS4265_DAC_CTL_DIF, (2 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 7)); + CS4265_SPDIF_CTL2_DIF, (2 << 6)); } else { snd_soc_update_bits(codec, CS4265_DAC_CTL, - CS4265_DAC_CTL_DIF, (3 << 5)); + CS4265_DAC_CTL_DIF, (3 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 7)); + CS4265_SPDIF_CTL2_DIF, (3 << 6)); } break; case SND_SOC_DAIFMT_LEFT_J: @@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_DIF, 0); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 6)); + CS4265_SPDIF_CTL2_DIF, 0); break; default: -- cgit v0.10.2 From e053f96b1a00022b4e2c7ceb7ac0229646626507 Mon Sep 17 00:00:00 2001 From: Denis Carikli Date: Thu, 23 Jul 2015 10:31:12 +0200 Subject: ARM: dts: i.MX35: Fix can support. Since commit 3d42a379b6fa5b46058e3302b1802b29f64865bb ("can: flexcan: add 2nd clock to support imx53 and newer") the can driver requires a dt nodes to have a second clock. Add them to imx35 to fix probing the flex can driver on the respective platforms. Signed-off-by: Denis Carikli Cc: Signed-off-by: Shawn Guo diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index b6478e9..e6540b5 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi @@ -286,8 +286,8 @@ can1: can@53fe4000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe4000 0x1000>; - clocks = <&clks 33>; - clock-names = "ipg"; + clocks = <&clks 33>, <&clks 33>; + clock-names = "ipg", "per"; interrupts = <43>; status = "disabled"; }; @@ -295,8 +295,8 @@ can2: can@53fe8000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe8000 0x1000>; - clocks = <&clks 34>; - clock-names = "ipg"; + clocks = <&clks 34>, <&clks 34>; + clock-names = "ipg", "per"; interrupts = <44>; status = "disabled"; }; -- cgit v0.10.2 From a153790a788392e75d37262a9c5960d1d91ddb7e Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 25 Jul 2015 11:22:03 +0200 Subject: ARM: nomadik: disable UART0 on Nomadik boards The UART0 is not used on these boards, yet active and blocking other use. Fix this by disabling UART0 and setting port aliases to maintain port enumeration to userspace. Signed-off-by: Linus Walleij Signed-off-by: Olof Johansson diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index 3d0b875..3d25dba 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -17,6 +17,7 @@ }; aliases { + serial1 = &uart1; stmpe-i2c0 = &stmpe0; stmpe-i2c1 = &stmpe1; }; diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index 85d3b95..3c140d0 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts @@ -15,6 +15,10 @@ bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; }; + aliases { + serial1 = &uart1; + }; + src@101e0000 { /* These chrystal drivers are not used on this board */ disable-sxtalo; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 9a5f2ba..ef794a3 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -757,6 +757,7 @@ clock-names = "uartclk", "apb_pclk"; pinctrl-names = "default"; pinctrl-0 = <&uart0_default_mux>; + status = "disabled"; }; uart1: uart@101fb000 { -- cgit v0.10.2 From 02c3b4c75994888e58a6e6e8176640cde9822597 Mon Sep 17 00:00:00 2001 From: Al Cooper Date: Fri, 24 Jul 2015 16:09:36 -0400 Subject: usb: gadget: bdc: fix a driver crash on disconnect ep_dequeue() in bdc_ep.c was capturing the hw dequeue pointer incorrectly by reading the wrong register for the upper 32 bits. Signed-off-by: Al Cooper Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index b04980c..1efa612 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) /* The current hw dequeue pointer */ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); deq_ptr_64 = tmp_32; - tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); + tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0)); deq_ptr_64 |= ((u64)tmp_32 << 32); /* we have the dma addr of next bd that will be fetched by hardware */ -- cgit v0.10.2 From c41b7767673cb76adeb2b5fde220209f717ea13c Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Mon, 27 Jul 2015 14:51:47 +0800 Subject: usb: gadget: f_uac2: fix calculation of uac2->p_interval The p_interval should be less if the 'bInterval' at the descriptor is larger, eg, if 'bInterval' is 5 for HS, the p_interval should be 8000 / 16 = 500. It fixes the patch 9bb87f168931 ("usb: gadget: f_uac2: send reasonably sized packets") Cc: # v3.18+ Fixes: 9bb87f168931 ("usb: gadget: f_uac2: send reasonably sized packets") Acked-by: Daniel Mack Signed-off-by: Peter Chen Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 6d3eb8b..5318615 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt) factor = 1000; } else { ep_desc = &hs_epin_desc; - factor = 125; + factor = 8000; } /* pre-compute some values for iso_complete() */ uac2->p_framesize = opts->p_ssize * num_channels(opts->p_chmask); rate = opts->p_srate * uac2->p_framesize; - uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; + uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1)); uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, prm->max_psize); -- cgit v0.10.2 From 774cf72f8ad971031428f8057d2947c8780a7b8c Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Fri, 24 Jul 2015 09:48:40 +0200 Subject: usb: gadget: f_hid: actually limit the number of instances There is a predefined maximum number of hid instances, currently 4. A chrdev region is allocated accordingly, but with configfs the user can create as many hid function directories as they like. To make the number of hid instances consistent with the number of allocated minors, the limit is enforced at directory creation time. Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index f7f35a3..6df9715 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -699,6 +699,10 @@ static inline int hidg_get_minor(void) int ret; ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); + if (ret >= HIDG_MINORS) { + ida_simple_remove(&hidg_ida, ret); + ret = -ENODEV; + } return ret; } -- cgit v0.10.2 From 4248bd7d3e2c7c87ff695d812018b8c22b5a5ab1 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Fri, 24 Jul 2015 09:48:41 +0200 Subject: usb: gadget: f_printer: actually limit the number of instances There is a predefined maximum number of printer instances, currently 4. A chrdev region is allocated accordingly, but with configfs the user can create as many printer function directories as they like. To make the number of printer instances consistent with the number of allocated minors, the limit is enforced at directory creation time. Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 44173df..357f63f 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = { static inline int gprinter_get_minor(void) { - return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + int ret; + + ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + if (ret >= PRINTER_MINORS) { + ida_simple_remove(&printer_ida, ret); + ret = -ENODEV; + } + + return ret; } static inline void gprinter_put_minor(int minor) -- cgit v0.10.2 From 17fb874dee093139923af8ed36061faa92cc8e79 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 24 Jul 2015 13:13:30 +0200 Subject: hwrng: core - correct error check of kthread_run call The kthread_run() function can return two different error values but the hwrng core only checks for -ENOMEM. If the other error value -EINTR is returned it is assigned to hwrng_fill and later used on a kthread_stop() call which naturally crashes. Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky Signed-off-by: Herbert Xu diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index da8faf7..5643b65 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused) static void start_khwrngd(void) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); - if (hwrng_fill == ERR_PTR(-ENOMEM)) { + if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed"); hwrng_fill = NULL; } -- cgit v0.10.2 From 74472233233f577eaa0ca6d6e17d9017b6e53150 Mon Sep 17 00:00:00 2001 From: Dirk Behme Date: Mon, 27 Jul 2015 08:56:05 +0200 Subject: USB: sierra: add 1199:68AB device ID Add support for the Sierra Wireless AR8550 device with USB descriptor 0x1199, 0x68AB. It is common with MC879x modules 1199:683c/683d which also are composite devices with 7 interfaces (0..6) and also MDM62xx based as the AR8550. The major difference are only the interface attributes 02/02/01 on interfaces 3 and 4 on the AR8550. They are vendor specific ff/ff/ff on MC879x modules. lsusb reports: Bus 001 Device 004: ID 1199:68ab Sierra Wireless, Inc. Device Descriptor: bLength 18 bDescriptorType 1 bcdUSB 2.00 bDeviceClass 0 (Defined at Interface level) bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 64 idVendor 0x1199 Sierra Wireless, Inc. idProduct 0x68ab bcdDevice 0.06 iManufacturer 3 Sierra Wireless, Incorporated iProduct 2 AR8550 iSerial 0 bNumConfigurations 1 Configuration Descriptor: bLength 9 bDescriptorType 2 wTotalLength 198 bNumInterfaces 7 bConfigurationValue 1 iConfiguration 1 Sierra Configuration bmAttributes 0xe0 Self Powered Remote Wakeup MaxPower 0mA Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 0 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x81 EP 1 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x01 EP 1 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 1 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x82 EP 2 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x02 EP 2 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 2 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x83 EP 3 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x03 EP 3 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 3 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 2 Communications bInterfaceSubClass 2 Abstract (modem) bInterfaceProtocol 1 AT-commands (v.25ter) iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x84 EP 4 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x85 EP 5 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x04 EP 4 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 4 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 2 Communications bInterfaceSubClass 2 Abstract (modem) bInterfaceProtocol 1 AT-commands (v.25ter) iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x86 EP 6 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x87 EP 7 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x05 EP 5 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 5 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x88 EP 8 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x89 EP 9 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x06 EP 6 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 6 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x8a EP 10 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x8b EP 11 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x07 EP 7 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Device Qualifier (for other device speed): bLength 10 bDescriptorType 6 bcdUSB 2.00 bDeviceClass 0 (Defined at Interface level) bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 64 bNumConfigurations 1 Device Status: 0x0001 Self Powered Signed-off-by: Dirk Behme Cc: Lars Melin Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 46179a0..07d1ecd 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist -- cgit v0.10.2 From c9fdec9f3970eeaa1b176422f46167f5f5158804 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 20 Jul 2015 12:14:39 +0300 Subject: iwlwifi: pcie: fix prepare card flow When the card is not owned by the PCIe bus, we need to acquire ownership first. This flow is implemented in iwl_pcie_prepare_card_hw. Because of a hardware bug, we need to disable link power management before we can request ownership otherwise the other user of the device won't get notified that we are requesting the device which will prevent us from acquire ownership. Same holds for the down flow where we need to make sure that any other potential user is notified that the driver is going down. CC: [4.1] Signed-off-by: Emmanuel Grumbach diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 6203c4a..9e144e7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } mdelay(5); } @@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) if (ret >= 0) return 0; + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + msleep(1); + for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, @@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; + if (ret >= 0) { + ret = 0; + goto out; + } usleep_range(200, 1000); t += 200; @@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_ERR(trans, "Couldn't prepare the card\n"); +out: + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + return ret; } -- cgit v0.10.2 From dc9f69b907f3853952d3ffb7918d05a662146712 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Tue, 7 Jul 2015 16:53:42 +0300 Subject: iwlwifi: mvm: Fix regular scan priority The code checks the total number of iterations to differentiate between regular scan and scheduled scan. However, regular scan has a total of one iteration, not zero. As a result, regular scan will have lower priority than it should have, and in case scheduled scan is already running when regular scan is requested, regular scan will be delayed until scheduled scan is aborted. Fix that by checking for total iterations number of one as an identifier for regular scan. Fixes: 133c8259f885 ("iwlwifi: mvm: rename generic_scan_cmd functions to dwell") Signed-off-by: Avraham Stern Signed-off-by: Emmanuel Grumbach diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5000bfcd..5514ad6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->scan_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); - if (iwl_mvm_scan_total_iterations(params) == 0) + if (iwl_mvm_scan_total_iterations(params) == 1) cmd->ooc_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); else -- cgit v0.10.2 From fe0d34d242fa1e0dec059e774d146a705420bc9a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 29 Jul 2015 05:52:14 +0930 Subject: module: weaken locking assertion for oops path. We don't actually hold the module_mutex when calling find_module_all from module_kallsyms_lookup_name: that's because it's used by the oops code and we don't want to deadlock. However, access to the list read-only is safe if preempt is disabled, so we can weaken the assertion. Keep a strong version for external callers though. Fixes: 0be964be0d45 ("module: Sanitize RCU usage and locking") Reported-by: He Kuang Cc: stable@kernel.org Acked-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell diff --git a/kernel/module.c b/kernel/module.c index 4d2b82e..b86b7bf 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name, } EXPORT_SYMBOL_GPL(find_symbol); -/* Search for module by name: must hold module_mutex. */ +/* + * Search for module by name: must hold module_mutex (or preempt disabled + * for read-only access). + */ static struct module *find_module_all(const char *name, size_t len, bool even_unformed) { struct module *mod; - module_assert_mutex(); + module_assert_mutex_or_preempt(); list_for_each_entry(mod, &modules, list) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) @@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len, struct module *find_module(const char *name) { + module_assert_mutex(); return find_module_all(name, strlen(name), false); } EXPORT_SYMBOL_GPL(find_module); -- cgit v0.10.2 From e350f8045f642dfc4c28a81c57d2103e1f7ceead Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 11 Jun 2015 20:19:25 +0900 Subject: extcon: palmas: Fix NULL pointer error This patch fixes NULL pointer error by removing the unneeded kfree() call of edev->name because extcon-palmas no longer allocate the memory for edev->name. Fixes: d71aadda19f8 ("extcon: Remove the optional name of extcon device") Signed-off-by: Chanwoo Choi Reviewed-by: Krzysztof Kozlowski diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 080d5cc..eebdf2a 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev) status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); if (status) { dev_err(&pdev->dev, "failed to register extcon device\n"); - kfree(palmas_usb->edev->name); return status; } @@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->id_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->vbus_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev) return 0; } -static int palmas_usb_remove(struct platform_device *pdev) -{ - struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); - - kfree(palmas_usb->edev->name); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int palmas_usb_suspend(struct device *dev) { @@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = { static struct platform_driver palmas_usb_driver = { .probe = palmas_usb_probe, - .remove = palmas_usb_remove, .driver = { .name = "palmas-usb", .of_match_table = of_palmas_match_tbl, -- cgit v0.10.2 From 4a8e70f5d0d80675fc17b9ba1e62db8ca6b91775 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 29 Jul 2015 13:16:06 +0300 Subject: HID: uclogic: fix limit in uclogic_tablet_enable() The limit should be ARRAY_SIZE(params) (5 elements) here instead of sizeof(params) (20 bytes). Fixes: 08177f40bd00 ('HID: uclogic: merge hid-huion driver in hid-uclogic') Signed-off-by: Dan Carpenter Reviewed-by: Nikolai Kondrashov Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 9416731..b905d50 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev) for (p = drvdata->rdesc; p <= drvdata->rdesc + drvdata->rsize - 4;) { if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && - p[3] < sizeof(params)) { + p[3] < ARRAY_SIZE(params)) { v = params[p[3]]; put_unaligned(cpu_to_le32(v), (s32 *)p); p += 4; -- cgit v0.10.2 From 4b563ea317c2262987f0675abf15066614f536a1 Mon Sep 17 00:00:00 2001 From: Jie Yang Date: Tue, 28 Jul 2015 16:01:05 +0800 Subject: ASoC: Intel: haswell: fix initialize 'NULL device *' issue Add initialization for sst_hsw.dev at init stage, which fix the 'NULL device *' warning issues. Signed-off-by: Jie Yang Signed-off-by: Mark Brown diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index f95f271..f6efa9d 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c @@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) if (hsw == NULL) return -ENOMEM; + hsw->dev = dev; + ipc = &hsw->ipc; ipc->dev = dev; ipc->ops.tx_msg = hsw_tx_msg; -- cgit v0.10.2 From 45f503df1ba445359b94e1758c5e4f2c3460c8e4 Mon Sep 17 00:00:00 2001 From: Jie Yang Date: Tue, 28 Jul 2015 16:01:06 +0800 Subject: ASoC: Intel: sst_byt: fix initialize 'NULL device *' issue Add initialization for sst_byt.dev at init stage, which fix the 'NULL device *' warning issues. Signed-off-by: Jie Yang Signed-off-by: Mark Brown diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c index 4c01bb4..5bbaa66 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c +++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c @@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata) if (byt == NULL) return -ENOMEM; + byt->dev = dev; + ipc = &byt->ipc; ipc->dev = dev; ipc->ops.tx_msg = byt_tx_msg; -- cgit v0.10.2 From 8ef9724bf9718af81cfc5132253372f79c71b7e2 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 26 Jul 2015 21:34:50 -0700 Subject: regmap: regcache-rbtree: Clean new present bits on present bitmap resize When inserting a new register into a block, the present bit map size is increased using krealloc. krealloc does not clear the additionally allocated memory, leaving it filled with random values. Result is that some registers are considered cached even though this is not the case. Fix the problem by clearing the additionally allocated memory. Also, if the bitmap size does not increase, do not reallocate the bitmap at all to reduce overhead. Fixes: 3f4ff561bc88 ("regmap: rbtree: Make cache_present bitmap per node") Signed-off-by: Guenter Roeck Signed-off-by: Mark Brown Cc: stable@vger.kernel.org diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 81751a4..56486d9 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (!blk) return -ENOMEM; - present = krealloc(rbnode->cache_present, - BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); - if (!present) { - kfree(blk); - return -ENOMEM; + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { + present = krealloc(rbnode->cache_present, + BITS_TO_LONGS(blklen) * sizeof(*present), + GFP_KERNEL); + if (!present) { + kfree(blk); + return -ENOMEM; + } + + memset(present + BITS_TO_LONGS(rbnode->blklen), 0, + (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) + * sizeof(*present)); + } else { + present = rbnode->cache_present; } /* insert the register value in the correct place in the rbnode block */ -- cgit v0.10.2 From b8d65e9662b1ffb3b52a65fd11b0b968022dc6a1 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 30 Jul 2015 16:53:54 +1000 Subject: powerpc/eeh-powernv: Fix unbalanced IRQ warning pnv_eeh_next_error() re-enables the eeh opal event interrupt but it gets called from a loop if there are more outstanding events to process, resulting in a warning due to enabling an already enabled interrupt. Instead the interrupt should only be re-enabled once the last outstanding event has been processed. Tested-by: Daniel Axtens Reported-by: Daniel Axtens Signed-off-by: Alistair Popple Acked-by: Gavin Shan Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 5cf5e6e..7cf0df8 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) } /* Unmask the event */ - if (eeh_enabled()) + if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) enable_irq(eeh_event_irq); return ret; -- cgit v0.10.2 From f0ad462189cc898aa0ef8ced849533ee03392bcc Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 23 Jul 2015 13:06:10 +0200 Subject: netfilter: nf_conntrack: silence warning on falling back to vmalloc() Since 88eab472ec21 ("netfilter: conntrack: adjust nf_conntrack_buckets default value"), the hashtable can easily hit this warning. We got reports from users that are getting this message in a quite spamming fashion, so better silence this. Signed-off-by: Pablo Neira Ayuso Acked-by: Florian Westphal diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 651039a..f168099 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); - if (!hash) { - printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); + if (!hash) hash = vzalloc(sz); - } if (hash && nulls) for (i = 0; i < nr_slots; i++) -- cgit v0.10.2 From 586b7ccdb7143b6a9b975d2c6ad52b6ca5c162b9 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 28 Jul 2015 15:03:05 +0200 Subject: KVM: s390: Fix hang VCPU hang/loop regression commit 785dbef407d8 ("KVM: s390: optimize round trip time in request handling") introduced a regression. This regression was seen with CPU hotplug in the guest and switching between 1 or 2 CPUs. This will set/reset the IBS control via synced request. Whenever we make a synced request, we first set the vcpu->requests bit and then block the vcpu. The handler, on the other hand, unblocks itself, processes vcpu->requests (by clearing them) and unblocks itself once again. Now, if the requester sleeps between setting of vcpu->requests and blocking, the handler will clear the vcpu->requests bit and try to unblock itself (although no bit is set). When the requester wakes up, it blocks the VCPU and we have a blocked VCPU without requests. Solution is to always unset the block bit. Signed-off-by: Christian Borntraeger Reviewed-by: David Hildenbrand Fixes: 785dbef407d8 ("KVM: s390: optimize round trip time in request handling") diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92..f32f843 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { - if (!vcpu->requests) - return 0; retry: kvm_s390_vcpu_request_handled(vcpu); + if (!vcpu->requests) + return 0; /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. -- cgit v0.10.2 From 1a727c63612fc582370cf3dc01239d3d239743b5 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 28 Jul 2015 01:42:28 +0300 Subject: netfilter: nf_conntrack: checking for IS_ERR() instead of NULL We recently changed this from nf_conntrack_alloc() to nf_ct_tmpl_alloc() so the error handling needs to changed to check for NULL instead of IS_ERR(). Fixes: 0838aa7fcfcd ('netfilter: fix netns dependencies with conntrack templates') Signed-off-by: Dan Carpenter Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 71f1e9f..d7f1685 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net) int err = -ENOMEM; ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL); - if (IS_ERR(ct)) { - err = PTR_ERR(ct); + if (!ct) goto err1; - } if (!nfct_seqadj_ext_add(ct)) goto err2; diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index c663003..43ddeee 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, goto err1; ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL); - ret = PTR_ERR(ct); - if (IS_ERR(ct)) + if (!ct) { + ret = -ENOMEM; goto err2; + } ret = 0; if ((info->ct_events || info->exp_events) && -- cgit v0.10.2 From 7cc03e48965453b5df1cce5062c826189b04b960 Mon Sep 17 00:00:00 2001 From: Dmitry Skorodumov Date: Tue, 28 Jul 2015 18:38:32 +0400 Subject: x86/efi: Use all 64 bit of efi_memmap in setup_e820() The efi_info structure stores low 32 bits of memory map in efi_memmap and high 32 bits in efi_memmap_hi. While constructing pointer in the setup_e820(), need to take into account all 64 bit of the pointer. It is because on 64bit machine the function efi_get_memory_map() may return full 64bit pointer and before the patch that pointer was truncated. The issue is triggered on Parallles virtual machine and fixed with this patch. Signed-off-by: Dmitry Skorodumov Cc: Denis V. Lunev Cc: Signed-off-by: Matt Fleming diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 2c82bd1..7d69afd 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params, unsigned int e820_type = 0; unsigned long m = efi->efi_memmap; +#ifdef CONFIG_X86_64 + m |= (u64)efi->efi_memmap_hi << 32; +#endif + d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); switch (d->type) { case EFI_RESERVED_TYPE: -- cgit v0.10.2 From 9115c7589b11349a1c3099758b4bded579ff69e0 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Wed, 15 Jul 2015 19:36:03 -0700 Subject: efi: Check for NULL efi kernel parameters Even though it is documented how to specifiy efi parameters, it is possible to cause a kernel panic due to a dereference of a NULL pointer when parsing such parameters if "efi" alone is given: PANIC: early exception 0e rip 10:ffffffff812fb361 error 0 cr2 0 [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.2.0-rc1+ #450 [ 0.000000] ffffffff81fe20a9 ffffffff81e03d50 ffffffff8184bb0f 00000000000003f8 [ 0.000000] 0000000000000000 ffffffff81e03e08 ffffffff81f371a1 64656c62616e6520 [ 0.000000] 0000000000000069 000000000000005f 0000000000000000 0000000000000000 [ 0.000000] Call Trace: [ 0.000000] [] dump_stack+0x45/0x57 [ 0.000000] [] early_idt_handler_common+0x81/0xae [ 0.000000] [] ? parse_option_str+0x11/0x90 [ 0.000000] [] arch_parse_efi_cmdline+0x15/0x42 [ 0.000000] [] do_early_param+0x50/0x8a [ 0.000000] [] parse_args+0x1e3/0x400 [ 0.000000] [] parse_early_options+0x24/0x28 [ 0.000000] [] ? loglevel+0x31/0x31 [ 0.000000] [] parse_early_param+0x31/0x3d [ 0.000000] [] setup_arch+0x2de/0xc08 [ 0.000000] [] ? vprintk_default+0x1a/0x20 [ 0.000000] [] start_kernel+0x90/0x423 [ 0.000000] [] x86_64_start_reservations+0x2a/0x2c [ 0.000000] [] x86_64_start_kernel+0xeb/0xef [ 0.000000] RIP 0xffffffff81ba2efc This panic is not reproducible with "efi=" as this will result in a non-NULL zero-length string. Thus, verify that the pointer to the parameter string is not NULL. This is consistent with other parameter-parsing functions which check for NULL pointers. Signed-off-by: Ricardo Neri Cc: Dave Young Cc: Signed-off-by: Matt Fleming diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index cfba30f..e4308fe 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr) static int __init arch_parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "old_map")) set_bit(EFI_OLD_MEMMAP, &efi.flags); if (parse_option_str(str, "debug")) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9fa8084..d6144e3 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -58,6 +58,11 @@ bool efi_runtime_disabled(void) static int __init parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "noruntime")) disable_runtime = true; -- cgit v0.10.2 From 36b8e180e1e929e00b351c3b72aab3147fc14116 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 14 Jul 2015 11:41:29 -0500 Subject: ipr: Fix locking for unit attention handling Make sure we have the host lock held when calling scsi_report_bus_reset. Fixes a crash seen as the __devices list in the scsi host was changing as we were iterating through it. Cc: Reviewed-by: Wen Xiong Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Brian King Reviewed-by: Martin K. Petersen Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 8827448..486ffaf 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6263,21 +6263,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - unsigned long hrrq_flags; + unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(scsi_cmd); - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(&ipr_cmd->hrrq->_lock); ipr_erp_start(ioa_cfg, ipr_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock(&ipr_cmd->hrrq->_lock); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } } -- cgit v0.10.2 From bb7c54339e6a10ecce5c4961adf5e75b3cf0af30 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 14 Jul 2015 11:41:31 -0500 Subject: ipr: Fix incorrect trace indexing When ipr's internal driver trace was changed to an atomic, a signed/unsigned bug slipped in which results in us indexing backwards in our memory buffer writing on memory that does not belong to us. This patch fixes this by removing the modulo and instead just mask off the low bits. Cc: Tested-by: Wen Xiong Reviewed-by: Wen Xiong Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Brian King Reviewed-by: Martin K. Petersen Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 486ffaf..7ae9cd7 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, { struct ipr_trace_entry *trace_entry; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned int trace_index; - trace_entry = &ioa_cfg->trace[atomic_add_return - (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; + trace_entry = &ioa_cfg->trace[trace_index]; trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 73790a1..6b97ee4 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg { #define IPR_NUM_TRACE_INDEX_BITS 8 #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) char trace_start[8]; #define IPR_TRACE_START_LABEL "trace" -- cgit v0.10.2 From 3f1c0581310d5d94bd72740231507e763a6252a4 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 14 Jul 2015 11:41:33 -0500 Subject: ipr: Fix invalid array indexing for HRRQ Fixes another signed / unsigned array indexing bug in the ipr driver. Currently, when hrrq_index wraps, it becomes a negative number. We do the modulo, but still have a negative number, so we end up indexing backwards in the array. Given where the hrrq array is located in memory, we probably won't actually reference memory we don't own, but nonetheless ipr is still looking at data within struct ipr_ioa_cfg and interpreting it as struct ipr_hrr_queue data, so bad things could certainly happen. Each ipr adapter has anywhere from 1 to 16 HRRQs. By default, we use 2 on new adapters. Let's take an example: Assume ioa_cfg->hrrq_index=0x7fffffffe and ioa_cfg->hrrq_num=4: The atomic_add_return will then return -1. We mod this with 3 and get -2, add one and get -1 for an array index. On adapters which support more than a single HRRQ, we dedicate HRRQ to adapter initialization and error interrupts so that we can optimize the other queues for fast path I/O. So all normal I/O uses HRRQ 1-15. So we want to spread the I/O requests across those HRRQs. With the default module parameter settings, this bug won't hit, only when someone sets the ipr.number_of_msix parameter to a value larger than 3 is when bad things start to happen. Cc: Tested-by: Wen Xiong Reviewed-by: Wen Xiong Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Brian King Reviewed-by: Martin K. Petersen Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 7ae9cd7..a9aa389 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1052,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) { + unsigned int hrrq; + if (ioa_cfg->hrrq_num == 1) - return 0; - else - return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; + hrrq = 0; + else { + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; + } + return hrrq; } /** -- cgit v0.10.2 From 0c958ecc69c277b25f38f72bc6d18ab145e8167c Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Thu, 16 Jul 2015 11:40:41 -0400 Subject: scsi: fix memory leak with scsi-mq Fix a memory leak with scsi-mq triggered by commands with large data transfer length. __sg_alloc_table() sets both table->nents and table->orig_nents to the same value. When the scatterlist is DMA-mapped, table->nents is overwritten with the (possibly smaller) size of the DMA-mapped scatterlist, while table->orig_nents retains the original size of the allocated scatterlist. scsi_free_sgtable() should therefore check orig_nents instead of nents, and all code that initializes sdb->table without calling __sg_alloc_table() should set both nents and orig_nents. Fixes: d285203cf647 ("scsi: add support for a blk-mq based I/O path.") Cc: # 3.17+ Signed-off-by: Tony Battersby Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 106884a..cfadcce 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, scmd->sdb.length); scmd->sdb.table.sgl = &ses->sense_sgl; scmd->sc_data_direction = DMA_FROM_DEVICE; - scmd->sdb.table.nents = 1; + scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1; scmd->cmnd[0] = REQUEST_SENSE; scmd->cmnd[4] = scmd->sdb.length; scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b1a2631..448ebda 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) { - if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) + if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) return; __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); } @@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) if (mq) { if (nents <= SCSI_MAX_SG_SEGMENTS) { - sdb->table.nents = nents; - sg_init_table(sdb->table.sgl, sdb->table.nents); + sdb->table.nents = sdb->table.orig_nents = nents; + sg_init_table(sdb->table.sgl, nents); return 0; } first_chunk = sdb->table.sgl; -- cgit v0.10.2 From aecdc63d87891c75e60906973c7b7c9cd58403d6 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 29 Jul 2015 23:06:41 +0300 Subject: iwlwifi: pcie: fix stuck queue detection for sleeping clients The stuck queue detection mechanism allows to detect queues that are stuck. For sleeping clients, a queue may rightfully be stuck: if a poor client implementation stays asleep for more than 10s, then we don't want to trigger recovery flows because of that client. In order to cope with this, I added a mechanism that monitors the state of the client: when a client goes to sleep, the timer of his queues is frozen. When he wakes up, the timer is reset to the right value so that if a client was awake for more than 10s and the queues are stuck, only then, the recovery flow will kick in. This is valid only on non-shared queues: A-MPDU queues. There was a bug in case we Tx to a sleeping client that has an empty A-MPDU queue: the timer was armed to now + 10s. This is bad, but pretty harmless. The problem is that when the client wakes up, the timer is modified to be now + remainder. But remainder is 0 since the queue was empty when that client went to sleep... Fix this by checking the state of the client before playing with the timer when we add a packet to an empty queue. Signed-off-by: Emmanuel Grumbach diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 2b86c21..607acb5 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); iwl_trans_pcie_ref(trans); } -- cgit v0.10.2 From 646c4b75494747887f936513b669bb8a2d794459 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Thu, 30 Jul 2015 15:51:32 +0800 Subject: x86/irq: Use the caller provided polarity setting in mp_check_pin_attr() Commit d32932d02e18 ("x86/irq: Convert IOAPIC to use hierarchical irqdomain interfaces") introduced a regression which causes malfunction of interrupt lines. The reason is that the conversion of mp_check_pin_attr() missed to update the polarity selection of the interrupt pin with the caller provided setting and instead uses a stale attribute value. That in turn results in chosing the wrong interrupt flow handler. Use the caller supplied setting to configure the pin correctly which also choses the correct interrupt flow handler. This restores the original behaviour and on the affected machine/driver (Surface Pro 3, i2c controller) all IOAPIC IRQ configuration are identical to v4.1. Fixes: d32932d02e18 ("x86/irq: Convert IOAPIC to use hierarchical irqdomain interfaces") Reported-and-tested-by: Matt Fleming Reported-and-tested-by: Chen Yu Signed-off-by: Jiang Liu Cc: Tony Luck Cc: Chen Yu Cc: Yinghai Lu Link: http://lkml.kernel.org/r/1438242695-23531-1-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 845dc0d..206052e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) */ if (irq < nr_legacy_irqs() && data->count == 1) { if (info->ioapic_trigger != data->trigger) - mp_register_handler(irq, data->trigger); + mp_register_handler(irq, info->ioapic_trigger); data->entry.trigger = data->trigger = info->ioapic_trigger; data->entry.polarity = data->polarity = info->ioapic_polarity; } -- cgit v0.10.2 From 54d9ffc416022cf82649fe5665875d8962dfac41 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Jul 2015 12:38:06 +0200 Subject: MAINTAINERS: Appoint Marc Zyngier as irqchips co-maintainer Signed-off-by: Thomas Gleixner Acked-by: Jason Cooper Cc: Marc Zyngier diff --git a/MAINTAINERS b/MAINTAINERS index 9289ecb..577be68 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5600,6 +5600,7 @@ F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner M: Jason Cooper +M: Marc Zyngier L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core -- cgit v0.10.2 From b09dec2c77dc0ef0bb43ade1628ab4b390e74c6d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Jul 2015 12:40:55 +0200 Subject: MAINTAINERS: Appoint Jiang and Marc as irqdomain maintainers Ben was pretty surprised that he is still listed as the maintainer and he has no objections against transferring the duty to those who rumaged in and revamped that code in the recent past. Add kernel/irq/msi.c to the affected files as it's part of the shiny new hierarchical irqdomain machinery. Signed-off-by: Thomas Gleixner Acked-by: Benjamin Herrenschmidt Cc: Jiang Liu Cc: Marc Zyngier Cc: Grant Likely diff --git a/MAINTAINERS b/MAINTAINERS index 577be68..a9ae6c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5609,11 +5609,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/ F: drivers/irqchip/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -M: Benjamin Herrenschmidt +M: Jiang Liu +M: Marc Zyngier S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/IRQ-domain.txt F: include/linux/irqdomain.h F: kernel/irq/irqdomain.c +F: kernel/irq/msi.c ISAPNP M: Jaroslav Kysela -- cgit v0.10.2 From be052cc87745e01846fb036eb81567c784439078 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Tue, 7 Jul 2015 16:06:15 +0300 Subject: extcon: Fix hang and extcon_get/set_cable_state(). Users of find_cable_index_by_name() will cause a kernel hang as the while loop counter is never incremented and end condition is never reached. extcon_get_cable_state() and extcon_set_cable_state() are broken because they use cable index instead of cable id. This causes the first cable state (cable.0) to be always invalid in sysfs or extcon_get_cable_state() users. Introduce a new function find_cable_id_by_name() that fixes both of the above issues. Fixes: commit 73b6ecdb93e8 ("extcon: Redefine the unique id of supported external connectors without 'enum extcon' type") Cc: Greg Kroah-Hartman Signed-off-by: Roger Quadros Tested-by: Ivan T. Ivanov [cw00.choi: Fix minor coding style] Signed-off-by: Chanwoo Choi diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 76157ab..0e2af17 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id return -EINVAL; } -static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +static int find_cable_id_by_name(struct extcon_dev *edev, const char *name) { - unsigned int id = EXTCON_NONE; + unsigned int id = -EINVAL; int i = 0; - if (edev->max_supported == 0) - return -EINVAL; - - /* Find the the number of extcon cable */ + /* Find the id of extcon cable */ while (extcon_name[i]) { if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { id = i; break; } + i++; } - if (id == EXTCON_NONE) + return id; +} + +static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +{ + unsigned int id; + + if (edev->max_supported == 0) return -EINVAL; + /* Find the the number of extcon cable */ + id = find_cable_id_by_name(edev, name); + if (id < 0) + return id; + return find_cable_index_by_id(edev, id); } @@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev, struct extcon_cable *cable = container_of(attr, struct extcon_cable, attr_state); + int i = cable->cable_index; + return sprintf(buf, "%d\n", extcon_get_cable_state_(cable->edev, - cable->cable_index)); + cable->edev->supported_cable[i])); } /** @@ -361,8 +373,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_); */ int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) { - return extcon_get_cable_state_(edev, find_cable_index_by_name - (edev, cable_name)); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_get_cable_state_(edev, id); } EXPORT_SYMBOL_GPL(extcon_get_cable_state); @@ -404,8 +421,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_); int extcon_set_cable_state(struct extcon_dev *edev, const char *cable_name, bool cable_state) { - return extcon_set_cable_state_(edev, find_cable_index_by_name - (edev, cable_name), cable_state); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_set_cable_state_(edev, id, cable_state); } EXPORT_SYMBOL_GPL(extcon_set_cable_state); -- cgit v0.10.2 From f7a898117aebf3d52370fe637f4d7aff7a237afc Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Mon, 6 Jul 2015 17:46:58 +0300 Subject: extcon: Fix extcon_cable_get_state() from getting old state after notification Currently the extcon code notifiers the interested listeners before it updates the extcon state with the new state. This will cause the listeners that use extcon_cable_get_state() to get the stale state and loose the new state. Fix this by first changing the extcon state variable and then notifying listeners. Signed-off-by: Roger Quadros Tested-by: Ivan T. Ivanov Signed-off-by: Chanwoo Choi diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 0e2af17..43b57b0 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -275,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state) spin_lock_irqsave(&edev->lock, flags); if (edev->state != ((edev->state & ~mask) | (state & mask))) { + u32 old_state; + if (check_mutually_exclusive(edev, (edev->state & ~mask) | (state & mask))) { spin_unlock_irqrestore(&edev->lock, flags); return -EPERM; } - for (index = 0; index < edev->max_supported; index++) { - if (is_extcon_changed(edev->state, state, index, &attached)) - raw_notifier_call_chain(&edev->nh[index], attached, edev); - } - + old_state = edev->state; edev->state &= ~mask; edev->state |= state & mask; + for (index = 0; index < edev->max_supported; index++) { + if (is_extcon_changed(old_state, edev->state, index, + &attached)) + raw_notifier_call_chain(&edev->nh[index], + attached, edev); + } + /* This could be in interrupt handler */ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); if (prop_buf) { -- cgit v0.10.2 From 5d5cd85ff441534a52f23f821d0a7c644d3b6cce Mon Sep 17 00:00:00 2001 From: Mike Looijmans Date: Tue, 28 Jul 2015 07:51:01 +0200 Subject: rsi: Fix failure to load firmware after memory leak fix and fix the leak Fixes commit eae79b4f3e82 ("rsi: fix memory leak in rsi_load_ta_instructions()") which stopped the driver from functioning. Firmware data has been allocated using vmalloc(), resulting in memory that cannot be used for DMA. Hence the firmware was first copied to a buffer allocated with kmalloc() in the original code. This patch reverts the commit and only calls "kfree()" to release the buffer after sending the data. This fixes the memory leak without breaking the driver. Add a comment to the kmemdup() calls to explain why this is done, and abort if memory allocation fails. Tested on a Topic Miami-Florida board which contains the rsi SDIO chip. Also added the same kfree() call to the USB glue driver. This was not tested on actual hardware though, as I only have the SDIO version. Fixes: eae79b4f3e82 ("rsi: fix memory leak in rsi_load_ta_instructions()") Signed-off-by: Mike Looijmans Cc: stable@vger.kernel.org Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b6cc9ff..1c6788a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 len; u32 num_blocks; + const u8 *fw; const struct firmware *fw_entry = NULL; u32 block_size = dev->tx_blk_size; int status = 0; @@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ + fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); - status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); + status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 1106ce7..30c2cf7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } -- cgit v0.10.2 From 098697dbad9070249eb07a0241c4001aa367bb89 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Wed, 29 Jul 2015 23:36:30 +0200 Subject: b43: fix extpa_gain check for 2GHz On the 2GHz and and on the 5GHZ band only the extpa_gain setting from the 5GHz band was checked. this patch makes it check the property from the correct band. Signed-off-by: Hauke Mehrtens Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 25d1cbd..b2f0d24 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) switch (phy->rev) { case 6: case 5: - if (sprom->fem.ghz5.extpa_gain == 3) + if (sprom->fem.ghz2.extpa_gain == 3) return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; /* fall through */ case 4: -- cgit v0.10.2 From 7c62940165e9ae4004ce4e6b5117330bab94df68 Mon Sep 17 00:00:00 2001 From: Luis Felipe Dominguez Vega Date: Wed, 29 Jul 2015 21:11:20 -0500 Subject: rtlwifi: Fix NULL dereference when PCI driver used as an AP In commit 33511b157bbcebaef853cc1811992b664a2e5862 ("rtlwifi: add support to send beacon frame"), the mechanism for sending beacons was established. That patch works correctly for rtl8192cu, but there is a possibility of getting the following warnings in the PCI drivers: WARNING: CPU: 1 PID: 2439 at net/mac80211/driver-ops.h:12 ieee80211_bss_info_change_notify+0x179/0x1d0 [mac80211]() wlp5s0: Failed check-sdata-in-driver check, flags: 0x0 The warning is followed by a NULL pointer dereference as follows: BUG: unable to handle kernel NULL pointer dereference at 0000000000000006 IP: [] rtl_get_tcb_desc+0x5e/0x760 [rtlwifi] This problem was reported at http://thread.gmane.org/gmane.linux.kernel.wireless.general/138645, but no solution was found at that time. The problem was also reported at https://bugzilla.kernel.org/show_bug.cgi?id=9744 and this solution was developed and tested there. The USB driver works with a NULL final argument in the adapter_tx() callback; however, the PCI drivers need a struct rtl_tcb_desc in that position. Fixes: 33511b157bbc ("rtlwifi: add support to send beacon frame.") Signed-off-by: Luis Felipe Dominguez Vega Signed-off-by: Larry Finger Cc: Stable [3.19+] Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 3b3a88b..585d088 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + struct rtl_tcb_desc tcb_desc; - if (skb) - rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); + if (skb) { + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); + } } static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, -- cgit v0.10.2 From aa1acff356bbedfd03b544051f5b371746735d89 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 30 Jul 2015 14:31:31 -0700 Subject: x86/xen: Probe target addresses in set_aliased_prot() before the hypercall The update_va_mapping hypercall can fail if the VA isn't present in the guest's page tables. Under certain loads, this can result in an OOPS when the target address is in unpopulated vmap space. While we're at it, add comments to help explain what's going on. This isn't a great long-term fix. This code should probably be changed to use something like set_memory_ro. Signed-off-by: Andy Lutomirski Cc: Andrew Cooper Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Vrabel Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jan Beulich Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt Cc: Thomas Gleixner Cc: security@kernel.org Cc: Cc: xen-devel Link: http://lkml.kernel.org/r/0b0e55b995cda11e7829f140b833ef932fcabe3a.1438291540.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0b95c9b..11d6fb4 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte_t pte; unsigned long pfn; struct page *page; + unsigned char dummy; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); @@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte = pfn_pte(pfn, prot); + /* + * Careful: update_va_mapping() will fail if the virtual address + * we're poking isn't populated in the page tables. We don't + * need to worry about the direct map (that's always in the page + * tables), but we need to be careful about vmap space. In + * particular, the top level page table can lazily propagate + * entries between processes, so if we've switched mms since we + * vmapped the target in the first place, we might not have the + * top-level page table entry populated. + * + * We disable preemption because we want the same mm active when + * we probe the target and when we issue the hypercall. We'll + * have the same nominal mm, but if we're a kernel thread, lazy + * mm dropping could change our pgd. + * + * Out of an abundance of caution, this uses __get_user() to fault + * in the target address just in case there's some obscure case + * in which the target address isn't readable. + */ + + preempt_disable(); + + pagefault_disable(); /* Avoid warnings due to being atomic. */ + __get_user(dummy, (unsigned char __user __force *)v); + pagefault_enable(); + if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); @@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot) BUG(); } else kmap_flush_unused(); + + preempt_enable(); } static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) @@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; int i; + /* + * We need to mark the all aliases of the LDT pages RO. We + * don't need to call vm_flush_aliases(), though, since that's + * only responsible for flushing aliases out the TLBs, not the + * page tables, and Xen will flush the TLB for us if needed. + * + * To avoid confusing future readers: none of this is necessary + * to load the LDT. The hypervisor only checks this when the + * LDT is faulted in due to subsequent descriptor access. + */ + for(i = 0; i < entries; i += entries_per_page) set_aliased_prot(ldt + i, PAGE_KERNEL_RO); } -- cgit v0.10.2 From 37868fe113ff2ba814b3b4eb12df214df555f8dc Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 30 Jul 2015 14:31:32 -0700 Subject: x86/ldt: Make modify_ldt synchronous modify_ldt() has questionable locking and does not synchronize threads. Improve it: redesign the locking and synchronize all threads' LDTs using an IPI on all modifications. This will dramatically slow down modify_ldt in multithreaded programs, but there shouldn't be any multithreaded programs that care about modify_ldt's performance in the first place. This fixes some fallout from the CVE-2015-5157 fixes. Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Andrew Cooper Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jan Beulich Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt Cc: Thomas Gleixner Cc: security@kernel.org Cc: Cc: xen-devel Link: http://lkml.kernel.org/r/4c6978476782160600471bd865b318db34c7b628.1438291540.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index a0bf89f..4e10d73 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -280,21 +280,6 @@ static inline void clear_LDT(void) set_ldt(NULL, 0); } -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 09b9620..364d274 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -9,8 +9,7 @@ * we put the segment information here. */ typedef struct { - void *ldt; - int size; + struct ldt_struct *ldt; #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 804a3a6..984abfe 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {} #endif /* + * ldt_structs can be allocated, used, and freed, but they are never + * modified while live. + */ +struct ldt_struct { + /* + * Xen requires page-aligned LDTs with special permissions. This is + * needed to prevent us from installing evil descriptors such as + * call gates. On native, we could merge the ldt_struct and LDT + * allocations, but it's not worth trying to optimize. + */ + struct desc_struct *entries; + int size; +}; + +static inline void load_mm_ldt(struct mm_struct *mm) +{ + struct ldt_struct *ldt; + + /* lockless_dereference synchronizes with smp_store_release */ + ldt = lockless_dereference(mm->context.ldt); + + /* + * Any change to mm->context.ldt is followed by an IPI to all + * CPUs with the mm active. The LDT will not be freed until + * after the IPI is handled by all such CPUs. This means that, + * if the ldt_struct changes before we return, the values we see + * will be safe, and the new values will be loaded before we run + * any user code. + * + * NB: don't try to convert this to use RCU without extreme care. + * We would still need IRQs off, because we don't want to change + * the local LDT after an IPI loaded a newer value than the one + * that we can see. + */ + + if (unlikely(ldt)) + set_ldt(ldt->entries, ldt->size); + else + clear_LDT(); + + DEBUG_LOCKS_WARN_ON(preemptible()); +} + +/* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); @@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * was called and then modify_ldt changed * prev->context.ldt but suppressed an IPI to this CPU. * In this case, prev->context.ldt != NULL, because we - * never free an LDT while the mm still exists. That - * means that next->context.ldt != prev->context.ldt, - * because mms never share an LDT. + * never set context.ldt to NULL while the mm still + * exists. That means that next->context.ldt != + * prev->context.ldt, because mms never share an LDT. */ if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); + load_mm_ldt(next); } #ifdef CONFIG_SMP else { @@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, load_cr3(next->pgd); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); load_mm_cr4(next); - load_LDT_nolock(&next->context); + load_mm_ldt(next); } } #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 922c5e0..cb9e5df 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1410,7 +1410,7 @@ void cpu_init(void) load_sp0(t, ¤t->thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); clear_all_debug_regs(); dbg_restore_debug_regs(); @@ -1459,7 +1459,7 @@ void cpu_init(void) load_sp0(t, thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3658de4..9469dfa 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment) int idx = segment >> 3; if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + struct ldt_struct *ldt; + if (idx > LDT_ENTRIES) return 0; - if (idx > current->active_mm->context.size) + /* IRQs are off, so this synchronizes with smp_store_release */ + ldt = lockless_dereference(current->active_mm->context.ldt); + if (!ldt || idx > ldt->size) return 0; - desc = current->active_mm->context.ldt; + desc = &ldt->entries[idx]; } else { if (idx > GDT_ENTRIES) return 0; - desc = raw_cpu_ptr(gdt_page.gdt); + desc = raw_cpu_ptr(gdt_page.gdt) + idx; } - return get_desc_base(desc + idx); + return get_desc_base(desc); } #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index c37886d..2bcc052 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -20,82 +21,82 @@ #include #include -#ifdef CONFIG_SMP +/* context.lock is held for us, so we don't need any locking. */ static void flush_ldt(void *current_mm) { - if (current->active_mm == current_mm) - load_LDT(¤t->active_mm->context); + mm_context_t *pc; + + if (current->active_mm != current_mm) + return; + + pc = ¤t->active_mm->context; + set_ldt(pc->ldt->entries, pc->ldt->size); } -#endif -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ +static struct ldt_struct *alloc_ldt_struct(int size) { - void *oldldt, *newldt; - int oldsize; - - if (mincount <= pc->size) - return 0; - oldsize = pc->size; - mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & - (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); - if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) - newldt = vmalloc(mincount * LDT_ENTRY_SIZE); + struct ldt_struct *new_ldt; + int alloc_size; + + if (size > LDT_ENTRIES) + return NULL; + + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); + if (!new_ldt) + return NULL; + + BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); + alloc_size = size * LDT_ENTRY_SIZE; + + /* + * Xen is very picky: it requires a page-aligned LDT that has no + * trailing nonzero bytes in any page that contains LDT descriptors. + * Keep it simple: zero the whole allocation and never allocate less + * than PAGE_SIZE. + */ + if (alloc_size > PAGE_SIZE) + new_ldt->entries = vzalloc(alloc_size); else - newldt = (void *)__get_free_page(GFP_KERNEL); - - if (!newldt) - return -ENOMEM; + new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (oldsize) - memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); - oldldt = pc->ldt; - memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, - (mincount - oldsize) * LDT_ENTRY_SIZE); + if (!new_ldt->entries) { + kfree(new_ldt); + return NULL; + } - paravirt_alloc_ldt(newldt, mincount); + new_ldt->size = size; + return new_ldt; +} -#ifdef CONFIG_X86_64 - /* CHECKME: Do we really need this ? */ - wmb(); -#endif - pc->ldt = newldt; - wmb(); - pc->size = mincount; - wmb(); - - if (reload) { -#ifdef CONFIG_SMP - preempt_disable(); - load_LDT(pc); - if (!cpumask_equal(mm_cpumask(current->mm), - cpumask_of(smp_processor_id()))) - smp_call_function(flush_ldt, current->mm, 1); - preempt_enable(); -#else - load_LDT(pc); -#endif - } - if (oldsize) { - paravirt_free_ldt(oldldt, oldsize); - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(oldldt); - else - put_page(virt_to_page(oldldt)); - } - return 0; +/* After calling this, the LDT is immutable. */ +static void finalize_ldt_struct(struct ldt_struct *ldt) +{ + paravirt_alloc_ldt(ldt->entries, ldt->size); } -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +/* context.lock is held */ +static void install_ldt(struct mm_struct *current_mm, + struct ldt_struct *ldt) { - int err = alloc_ldt(new, old->size, 0); - int i; + /* Synchronizes with lockless_dereference in load_mm_ldt. */ + smp_store_release(¤t_mm->context.ldt, ldt); + + /* Activate the LDT for all CPUs using current_mm. */ + on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); +} - if (err < 0) - return err; +static void free_ldt_struct(struct ldt_struct *ldt) +{ + if (likely(!ldt)) + return; - for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); - return 0; + paravirt_free_ldt(ldt->entries, ldt->size); + if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(ldt->entries); + else + kfree(ldt->entries); + kfree(ldt); } /* @@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + struct ldt_struct *new_ldt; struct mm_struct *old_mm; int retval = 0; mutex_init(&mm->context.lock); - mm->context.size = 0; old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { - mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); - mutex_unlock(&old_mm->context.lock); + if (!old_mm) { + mm->context.ldt = NULL; + return 0; } + + mutex_lock(&old_mm->context.lock); + if (!old_mm->context.ldt) { + mm->context.ldt = NULL; + goto out_unlock; + } + + new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); + if (!new_ldt) { + retval = -ENOMEM; + goto out_unlock; + } + + memcpy(new_ldt->entries, old_mm->context.ldt->entries, + new_ldt->size * LDT_ENTRY_SIZE); + finalize_ldt_struct(new_ldt); + + mm->context.ldt = new_ldt; + +out_unlock: + mutex_unlock(&old_mm->context.lock); return retval; } @@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ void destroy_context(struct mm_struct *mm) { - if (mm->context.size) { -#ifdef CONFIG_X86_32 - /* CHECKME: Can this ever happen ? */ - if (mm == current->active_mm) - clear_LDT(); -#endif - paravirt_free_ldt(mm->context.ldt, mm->context.size); - if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - put_page(virt_to_page(mm->context.ldt)); - mm->context.size = 0; - } + free_ldt_struct(mm->context.ldt); + mm->context.ldt = NULL; } static int read_ldt(void __user *ptr, unsigned long bytecount) { - int err; + int retval; unsigned long size; struct mm_struct *mm = current->mm; - if (!mm->context.size) - return 0; + mutex_lock(&mm->context.lock); + + if (!mm->context.ldt) { + retval = 0; + goto out_unlock; + } + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; - mutex_lock(&mm->context.lock); - size = mm->context.size * LDT_ENTRY_SIZE; + size = mm->context.ldt->size * LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; - mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; + if (copy_to_user(ptr, mm->context.ldt->entries, size)) { + retval = -EFAULT; + goto out_unlock; + } + if (size != bytecount) { - /* zero-fill the rest */ - if (clear_user(ptr + size, bytecount - size) != 0) { - err = -EFAULT; - goto error_return; + /* Zero-fill the rest and pretend we read bytecount bytes. */ + if (clear_user(ptr + size, bytecount - size)) { + retval = -EFAULT; + goto out_unlock; } } - return bytecount; -error_return: - return err; + retval = bytecount; + +out_unlock: + mutex_unlock(&mm->context.lock); + return retval; } static int read_default_ldt(void __user *ptr, unsigned long bytecount) @@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) struct desc_struct ldt; int error; struct user_desc ldt_info; + int oldsize, newsize; + struct ldt_struct *new_ldt, *old_ldt; error = -EINVAL; if (bytecount != sizeof(ldt_info)) @@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) goto out; } - mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, - ldt_info.entry_number + 1, 1); - if (error < 0) - goto out_unlock; - } - - /* Allow LDTs to be cleared by the user. */ - if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { - if (oldmode || LDT_empty(&ldt_info)) { - memset(&ldt, 0, sizeof(ldt)); - goto install; + if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || + LDT_empty(&ldt_info)) { + /* The user wants to clear the entry. */ + memset(&ldt, 0, sizeof(ldt)); + } else { + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out; } + + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; } - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { - error = -EINVAL; + mutex_lock(&mm->context.lock); + + old_ldt = mm->context.ldt; + oldsize = old_ldt ? old_ldt->size : 0; + newsize = max((int)(ldt_info.entry_number + 1), oldsize); + + error = -ENOMEM; + new_ldt = alloc_ldt_struct(newsize); + if (!new_ldt) goto out_unlock; - } - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; + if (old_ldt) + memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); + new_ldt->entries[ldt_info.entry_number] = ldt; + finalize_ldt_struct(new_ldt); - /* Install the new entry ... */ -install: - write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); + install_ldt(mm, new_ldt); + free_ldt_struct(old_ldt); error = 0; out_unlock: diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71d7849..f6b9163 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all) void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - if (dead_task->mm->context.size) { + if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, dead_task->mm->context.ldt, - dead_task->mm->context.size); + dead_task->mm->context.ldt->size); BUG(); } } diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 9b4d51d..6273324 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -5,6 +5,7 @@ #include #include #include +#include unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { @@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re seg &= ~7UL; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(!child->mm->context.ldt || + (seg >> 3) >= child->mm->context.ldt->size)) addr = -1L; /* bogus selector, access would fault */ else { - desc = child->mm->context.ldt + seg; + desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 0d7dd1f..9ab5279 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -22,6 +22,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -153,7 +154,7 @@ static void fix_processor_context(void) syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ - load_LDT(¤t->active_mm->context); /* This does lldt */ + load_mm_ldt(current->active_mm); /* This does lldt */ fpu__resume_cpu(); } -- cgit v0.10.2 From f6762cb2ca48e9052b5233c338fa254fa58d8981 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 7 Jul 2015 16:18:46 +0800 Subject: ceph: fix ceph_encode_locks_to_buffer() posix locks should be in ctx->flc_posix list Signed-off-by: Yan, Zheng Signed-off-by: Ilya Dryomov diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 4347039..6706bde 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode, return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_flock, fl_list) { + list_for_each_entry(lock, &ctx->flc_posix, fl_list) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; -- cgit v0.10.2 From fc927cd32feca2acefd90a4ac317fa4f0a2e5955 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 20 Jul 2015 09:50:58 +0800 Subject: ceph: always re-send cap flushes when MDS recovers commit e548e9b93d3e565e42b938a99804114565be1f81 makes the kclient only re-send cap flush once during MDS failover. If the kclient sends a cap flush after MDS enters reconnect stage but before MDS recovers. The kclient will skip re-sending the same cap flush when MDS recovers. This causes problem for newly created inode. The MDS handles cap flushes before replaying unsafe requests, so it's possible that MDS find corresponding inode is missing when handling cap flush. The fix is reverting to old behaviour: always re-send when MDS recovers Signed-off-by: Yan, Zheng Signed-off-by: Ilya Dryomov diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index dc10c9d..ddd5e94 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode, swap(cf, ci->i_prealloc_cap_flush); cf->caps = flushing; - cf->kick = false; spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); @@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, static int __kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, - struct ceph_inode_info *ci, - bool kick_all) + struct ceph_inode_info *ci) { struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; @@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc, for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { cf = rb_entry(n, struct ceph_cap_flush, i_node); - if (cf->tid < first_tid) - continue; - if (kick_all || cf->kick) + if (cf->tid >= first_tid) break; } if (!n) { @@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc, } cf = rb_entry(n, struct ceph_cap_flush, i_node); - cf->kick = false; first_tid = cf->tid + 1; @@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, { struct ceph_inode_info *ci; struct ceph_cap *cap; - struct ceph_cap_flush *cf; - struct rb_node *n; dout("early_kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { @@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, if ((cap->issued & ci->i_flushing_caps) != ci->i_flushing_caps) { spin_unlock(&ci->i_ceph_lock); - if (!__kick_flushing_caps(mdsc, session, ci, true)) + if (!__kick_flushing_caps(mdsc, session, ci)) continue; spin_lock(&ci->i_ceph_lock); } - for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { - cf = rb_entry(n, struct ceph_cap_flush, i_node); - cf->kick = true; - } - spin_unlock(&ci->i_ceph_lock); } } @@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, dout("kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { - int delayed = __kick_flushing_caps(mdsc, session, ci, false); + int delayed = __kick_flushing_caps(mdsc, session, ci); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); @@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, spin_unlock(&ci->i_ceph_lock); - delayed = __kick_flushing_caps(mdsc, session, ci, true); + delayed = __kick_flushing_caps(mdsc, session, ci); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 860cc016..2f2460d 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) struct ceph_cap_flush { u64 tid; int caps; - bool kick; struct rb_node g_node; // global union { struct rb_node i_node; // inode -- cgit v0.10.2 From 2761713d35e370fd640b5781109f753066b746c4 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 16 Jul 2015 17:36:11 +0300 Subject: rbd: fix copyup completion race For write/discard obj_requests that involved a copyup method call, the opcode of the first op is CEPH_OSD_OP_CALL and the ->callback is rbd_img_obj_copyup_callback(). The latter frees copyup pages, sets ->xferred and delegates to rbd_img_obj_callback(), the "normal" image object callback, for reporting to block layer and putting refs. rbd_osd_req_callback() however treats CEPH_OSD_OP_CALL as a trivial op, which means obj_request is marked done in rbd_osd_trivial_callback(), *before* ->callback is invoked and rbd_img_obj_copyup_callback() has a chance to run. Marking obj_request done essentially means giving rbd_img_obj_callback() a license to end it at any moment, so if another obj_request from the same img_request is being completed concurrently, rbd_img_obj_end_request() may very well be called on such prematurally marked done request: handle_reply() rbd_osd_req_callback() rbd_osd_trivial_callback() rbd_obj_request_complete() rbd_img_obj_copyup_callback() rbd_img_obj_callback() handle_reply() rbd_osd_req_callback() rbd_osd_trivial_callback() for_each_obj_request(obj_request->img_request) { rbd_img_obj_end_request(obj_request-1/2) rbd_img_obj_end_request(obj_request-2/2) <-- } Calling rbd_img_obj_end_request() on such a request leads to trouble, in particular because its ->xfferred is 0. We report 0 to the block layer with blk_update_request(), get back 1 for "this request has more data in flight" and then trip on rbd_assert(more ^ (which == img_request->obj_request_count)); with rhs (which == ...) being 1 because rbd_img_obj_end_request() has been called for both requests and lhs (more) being 1 because we haven't got a chance to set ->xfferred in rbd_img_obj_copyup_callback() yet. To fix this, leverage that rbd wants to call class methods in only two cases: one is a generic method call wrapper (obj_request is standalone) and the other is a copyup (obj_request is part of an img_request). So make a dedicated handler for CEPH_OSD_OP_CALL and directly invoke rbd_img_obj_copyup_callback() from it if obj_request is part of an img_request, similar to how CEPH_OSD_OP_READ handler invokes rbd_img_obj_request_read_callback(). Since rbd_img_obj_copyup_callback() is now being called from the OSD request callback (only), it is renamed to rbd_osd_copyup_callback(). Cc: Alex Elder Cc: stable@vger.kernel.org # 3.10+, needs backporting for < 3.18 Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d94529d..bc67a93 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ +static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); @@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) obj_request_done_set(obj_request); } +static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) +{ + dout("%s: obj %p\n", __func__, obj_request); + + if (obj_request_img_data_test(obj_request)) + rbd_osd_copyup_callback(obj_request); + else + obj_request_done_set(obj_request); +} + static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, struct ceph_msg *msg) { @@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_discard_callback(obj_request); break; case CEPH_OSD_OP_CALL: + rbd_osd_call_callback(obj_request); + break; case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: rbd_osd_trivial_callback(obj_request); @@ -2530,13 +2543,15 @@ out_unwind: } static void -rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) +rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; struct page **pages; u32 page_count; + dout("%s: obj %p\n", __func__, obj_request); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO || obj_request->type == OBJ_REQUEST_NODATA); rbd_assert(obj_request_img_data_test(obj_request)); @@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) if (!obj_request->result) obj_request->xferred = obj_request->length; - /* Finish up with the normal image object callback */ - - rbd_img_obj_callback(obj_request); + obj_request_done_set(obj_request); } static void @@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) /* All set, send it off. */ - orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; img_result = rbd_obj_request_submit(osdc, orig_request); if (!img_result) -- cgit v0.10.2 From 1f023297f7f77d434ecc221018d2e181eac0ae36 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Mon, 27 Jul 2015 00:16:31 +0300 Subject: i2c: slave eeprom: clean up sysfs bin attribute read()/write() The change removes redundant sysfs binary file boundary checks, since this task is already done on caller side in fs/sysfs/file.c Note, on file size overflow read() now returns 0, and this is a correct and expected EOF notification according to POSIX. Signed-off-by: Vladimir Zapolskiy Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 8223746..1da4496 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); @@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); -- cgit v0.10.2 From d12c0aaf3780c5b26b4ea9e795252381f586c063 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Mon, 27 Jul 2015 00:18:51 +0300 Subject: misc: eeprom: at24: clean up at24_bin_write() The change removes redundant sysfs binary file boundary check, since this task is already done on caller side in fs/sysfs/file.c Signed-off-by: Vladimir Zapolskiy Signed-off-by: Wolfram Sang diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2d3db81..6ded3dc 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, { struct at24_data *at24; - if (unlikely(off >= attr->size)) - return -EFBIG; - at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_write(at24, buf, off, count); } -- cgit v0.10.2 From 8b06260836ab47abbb5ea49d889660a0ed8adf90 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 8 Jul 2015 16:35:06 +0200 Subject: i2c: core: only use set_scl for bus recovery after calling prepare_recovery Using set_scl may be ineffective before calling the driver specific prepare_recovery callback, which might change into a test mode. So instead of setting SCL in i2c_generic_scl_recovery, move it to i2c_generic_recovery (after the optional prepare_recovery). Signed-off-by: Jan Luebbe Acked-by: Alexander Sverdlin Tested-by: Alexander Sverdlin Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index e6d4935..8b64cf3 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + bri->set_scl(adap, val); + ndelay(RECOVERY_NDELAY); + /* * By this time SCL is high, as we need to give 9 falling-rising edges */ @@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) int i2c_generic_scl_recovery(struct i2c_adapter *adap) { - adap->bus_recovery_info->set_scl(adap, 1); return i2c_generic_recovery(adap); } EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); -- cgit v0.10.2 From 828e66c0edf97bcb79b59b1ddd803a50629b3937 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 8 Jul 2015 16:35:27 +0200 Subject: i2c: omap: fix bus recovery setup At least on the AM335x, enabling OMAP_I2C_SYSTEST_ST_EN is not enough to allow direct access to the SCL and SDA pins. In addition to ST_EN, we need to set the TMODE to 0b11 (Loop back & SDA/SCL IO mode select). Also, as the reset values of SCL_O and SDA_O are 0 (which means "drive low level"), we need to set them to 1 (which means "high-impedance") to avoid unwanted changes on the pins. As a precaution, reset all these bits to their default values after recovery is complete. Signed-off-by: Jan Luebbe Tested-by: Alexander Sverdlin Reviewed-by: Grygorii Strashko Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index d1c22e3..fc9bf7f 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* enable test mode */ reg |= OMAP_I2C_SYSTEST_ST_EN; + /* select SDA/SCL IO mode */ + reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT; + /* set SCL to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SCL_O; + /* set SDA to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } @@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* restore reset values */ reg &= ~OMAP_I2C_SYSTEST_ST_EN; + reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK; + reg &= ~OMAP_I2C_SYSTEST_SCL_O; + reg &= ~OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } -- cgit v0.10.2 From e952849a02524a247967bf7105f36fbb13cd00c2 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 28 Jul 2015 20:11:23 +0900 Subject: i2c: Fix typo in i2c-bfin-twi.c This patch fix some typos found in a printk message and MODULE_DESCRIPTION. Signed-off-by: Masanari Iida Acked-by: Sonic Zhang Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index af162b4..025686d 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iface); - dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " + dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, " "regs_base@%p\n", iface->regs_base); return 0; @@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); -MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); +MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-bfin-twi"); -- cgit v0.10.2 From 20cadcb4df3eebd82410eab4aa91d5b083e16cd1 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Wed, 17 Jun 2015 16:22:26 +0200 Subject: dmaengine: at_xdmac: fix bug about channel configuration When using descriptor view 2 or higher, we don't write the configuration into AT_XDMAC_CC register because this configuration will be fetch from the descriptor. Unfortunately, the PROT bit is not updated with this method, we have to do it manually before enabling the channel. Signed-off-by: Ludovic Desroches Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index cf1213d..52ca1cc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses. */ - if (at_xdmac_chan_is_cyclic(atchan)) { + if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); - } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) { + else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; - } else { - /* - * No need to write AT_XDMAC_CC reg, it will be done when the - * descriptor is fecthed. - */ + else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; - } + /* + * Even if the register will be updated from the configuration in the + * descriptor when using view 2 or higher, the PROT bit won't be set + * properly. This bit can be modified only by using the channel + * configuration register. + */ + at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); reg |= AT_XDMAC_CNDC_NDDUP | AT_XDMAC_CNDC_NDSUP -- cgit v0.10.2 From 93dce3a6434f632dbd684ec1d9c3dc66a14e27e1 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 18 Jun 2015 13:25:41 +0200 Subject: dmaengine: at_hdmac: fix residue computation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As claimed by the programmer datasheet and confirmed by the IP designer, the Block Transfer Size (BTSIZE) bitfield of the Channel x Control A Register (CTRLAx) always refers to a number of Source Width (SRC_WIDTH) transfers. Both the SRC_WIDTH and BTSIZE bitfields can be extacted from the CTRLAx register to compute the DMA residue. So the 'tx_width' field is useless and can be removed from the struct at_desc. Before this patch, atc_prep_slave_sg() was not consistent: BTSIZE was correctly initialized according to the SRC_WIDTH but 'tx_width' was always set to reg_width, which was incorrect for MEM_TO_DEV transfers. It led to bad DMA residue when 'tx_width' != SRC_WIDTH. Also the 'tx_width' field was mostly set only in the first and last descriptors. Depending on the kind of DMA transfer, this field remained uninitialized for intermediate descriptors. The accurate DMA residue was computed only when the currently processed descriptor was the first or the last of the chain. This algorithm was a little bit odd. An accurate DMA residue can always be computed using the SRC_WIDTH and BTSIZE bitfields in the CTRLAx register. Finally, the test to check whether the currently processed descriptor is the last of the chain was wrong: for cyclic transfer, last_desc->lli.dscr is NOT equal to zero, since set_desc_eol() is never called, but logically equal to first_desc->txd.phys. This bug has a side effect on the drivers/tty/serial/atmel_serial.c driver, which uses cyclic DMA transfer to receive data. Since the DMA residue was wrong each time the DMA transfer reaches the second (and last) period of the transfer, no more data were received by the USART driver till the cyclic DMA transfer loops back to the first period. Signed-off-by: Cyrille Pitchen Acked-by: Torsten Fleischer Tested-by: Jirí Prchal Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 59892126..d3629b7 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -48,6 +48,8 @@ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ATC_MAX_DSCR_TRIALS 10 + /* * Initial number of descriptors to allocate for each channel. This could * be increased during dma usage. @@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA - * @desc: the descriptor containing the transfer width */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla, - struct at_desc *desc) +static inline int atc_calc_bytes_left(int current_len, u32 ctrla) { - return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); -} + u32 btsize = (ctrla & ATC_BTSIZE_MAX); + u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); -/** - * atc_calc_bytes_left_from_reg - calculates the number of bytes left according - * to the current value of CTRLA. - * - * @current_len: the number of bytes left before reading CTRLA - * @atchan: the channel to read CTRLA for - * @desc: the descriptor containing the transfer width - */ -static inline int atc_calc_bytes_left_from_reg(int current_len, - struct at_dma_chan *atchan, struct at_desc *desc) -{ - u32 ctrla = channel_readl(atchan, CTRLA); - - return atc_calc_bytes_left(current_len, ctrla, desc); + /* + * According to the datasheet, when reading the Control A Register + * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the + * number of transfers completed on the Source Interface. + * So btsize is always a number of source width transfers. + */ + return current_len - (btsize << src_width); } /** @@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr; + u32 ctrla, dscr, trials; /* * If the cookie doesn't match to the currently running transfer then @@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) * the channel's DSCR register and compare it against the value * of the hardware linked list structure of each child * descriptor. + * + * The CTRLA register provides us with the amount of data + * already read from the source for the current child + * descriptor. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of + * data. + * + * However, the DSCR and CTRLA registers cannot be read both + * atomically. Hence a race condition may occur: the first read + * register may refer to one child descriptor whereas the second + * read may refer to a later child descriptor in the list + * because of the DMA transfer progression inbetween the two + * reads. + * + * One solution could have been to pause the DMA transfer, read + * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, + * this approach presents some drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns + * are more likey to occur depending on the system latency. + * Taking the USART driver as an example, it uses a cyclic DMA + * transfer to read data from the Receive Holding Register + * (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer + * to compute the residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather + * avoid to do so for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a + * first time, the CTRLA is read in turn, next the DSCR is read + * a second time. If the two consecutive read values of the DSCR + * are the same then we assume both refers to the very same + * child descriptor as well as the CTRLA value read inbetween + * does. For cyclic tranfers, the assumption is that a full loop + * is "not so fast". + * If the two DSCR values are different, we read again the CTRLA + * then the DSCR till two consecutive read values from DSCR are + * equal or till the maxium trials is reach. + * This algorithm is very unlikely not to find a stable value for + * DSCR. */ - ctrla = channel_readl(atchan, CTRLA); - rmb(); /* ensure CTRLA is read before DSCR */ dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + u32 new_dscr; + + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); + + /* + * If the DSCR register value has not changed inside the + * DMA controller since the previous read, we assume + * that both the dscr and ctrla values refers to the + * very same descriptor. + */ + if (likely(new_dscr == dscr)) + break; + + /* + * DSCR has changed inside the DMA controller, so the + * previouly read value of CTRLA may refer to an already + * processed descriptor hence could be outdated. + * We need to update ctrla to match the current + * descriptor. + */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + } + if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla, desc_first); + return atc_calc_bytes_left(ret, ctrla); ret -= desc_first->len; list_for_each_entry(desc, &desc_first->tx_list, desc_node) { @@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) } /* - * For the last descriptor in the chain we can calculate + * For the current descriptor in the chain we can calculate * the remaining bytes using the channel's register. - * Note that the transfer width of the first and last - * descriptor may differ. */ - if (!desc->lli.dscr) - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); + ret = atc_calc_bytes_left(ret, ctrla); } else { /* single transfer */ - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); + ctrla = channel_readl(atchan, CTRLA); + ret = atc_calc_bytes_left(ret, ctrla); } return ret; @@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan, desc->txd.cookie = -EBUSY; desc->total_len = desc->len = len; - desc->tx_width = dwidth; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, first->txd.cookie = -EBUSY; first->total_len = len; - /* set transfer width for the calculation of the residue */ - first->tx_width = src_width; - prev->tx_width = src_width; - /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first->txd.cookie = -EBUSY; first->total_len = total_len; - /* set transfer width for the calculation of the residue */ - first->tx_width = reg_width; - prev->tx_width = reg_width; - /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan, desc->txd.cookie = 0; desc->len = len; - /* - * Although we only need the transfer width for the first and - * the last descriptor, its easier to set it to all descriptors. - */ - desc->tx_width = src_width; - atc_desc_chain(&first, &prev, desc); /* update the lengths and addresses for the next loop cycle */ @@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->total_len = buf_len; - first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index bc8d5eb..7f5a082 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -112,6 +112,7 @@ #define ATC_SRC_WIDTH_BYTE (0x0 << 24) #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) #define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ #define ATC_DST_WIDTH(x) ((x) << 28) #define ATC_DST_WIDTH_BYTE (0x0 << 28) @@ -182,7 +183,6 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: descriptor byte count - * @tx_width: transfer width * @total_len: total transaction byte count */ struct at_desc { @@ -194,7 +194,6 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; - u32 tx_width; size_t total_len; /* Interleaved data */ -- cgit v0.10.2 From 1c8a38b1268aebc1a903b21b11575077e02d2cf7 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Tue, 30 Jun 2015 14:36:57 +0200 Subject: dmaengine: at_xdmac: fix transfer data width in at_xdmac_prep_slave_sg() This patch adds the missing update of the transfer data width in at_xdmac_prep_slave_sg(). Indeed, for each item in the scatter-gather list, we check whether the transfer length is aligned with the data width provided by dmaengine_slave_config(). If so, we directly use this data width for the current part of the transfer we are preparing. Otherwise, the data width is reduced to 8 bits (1 byte). Of course, the actual number of register accesses must also be updated to match the new data width. So one chunk was missing in the original patch (see Fixes tag below): the number of register accesses was correctly set to (len >> fixed_dwidth) in mbr_ubc but the real data width was not updated in mbr_cfg. Since mbr_cfg may change for each part of the scatter-gather transfer this also explains why the original patch used the Descriptor View 2 instead of the Descriptor View 1. Let's take the example of a DMA transfer to write 8bit data into an Atmel USART with FIFOs. When FIFOs are enabled in the USART, its Transmit Holding Register (THR) works in multidata mode, that is to say that up to 4 8bit data can be written into the THR in a single 32bit access and it is still possible to write only one data with a 8bit access. To take advantage of this new feature, the DMA driver was modified to allow multiple dwidths when doing slave transfers. For instance, when the total length is 22 bytes, the USART driver splits the transfer into 2 parts: First part: 20 bytes transferred through 5 32bit writes into THR Second part: 2 bytes transferred though 2 8bit writes into THR For the second part, the data width was first set to 4_BYTES by the USART driver thanks to dmaengine_slave_config() then at_xdmac_prep_slave_sg() reduces this data width to 1_BYTE because the 2 byte length is not aligned with the original 4_BYTES data width. Since the data width is modified, the actual number of writes into THR must be set accordingly. Signed-off-by: Cyrille Pitchen Fixes: 6d3a7d9e3ada ("dmaengine: at_xdmac: allow muliple dwidths when doing slave transfers") Cc: stable@vger.kernel.org #4.0 and later Acked-by: Nicolas Ferre Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 52ca1cc..40afa2a 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -682,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lld.mbr_sa = mem; desc->lld.mbr_da = atchan->sconfig.dst_addr; } - desc->lld.mbr_cfg = atchan->cfg; - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + dwidth = at_xdmac_get_dwidth(atchan->cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) - ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) + ? dwidth : AT_XDMAC_CC_DWIDTH_BYTE; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | (len >> fixed_dwidth); /* microblock length */ + desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | + AT_XDMAC_CC_DWIDTH(fixed_dwidth); dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); -- cgit v0.10.2 From cda8e937191c100025168ba3e22ab316c7298007 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Tue, 7 Jul 2015 15:34:25 +0530 Subject: dmaengine: xgene-dma: Fix the resource map to handle overlapping There is an overlap in dma ring cmd csr region due to sharing of ethernet ring cmd csr region. This patch fix the resource overlapping by mapping the entire dma ring cmd csr region. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt index d305876..c53e0b0 100644 --- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt +++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt @@ -35,7 +35,7 @@ Example: device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0689c3f..58093ed 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -823,7 +823,7 @@ device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec..dff22ab 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -111,6 +111,7 @@ #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_BLK_MEM_RDY 0xD074 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF +#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 /* X-Gene SoC EFUSE csr register and bit defination */ #define XGENE_SOC_JTAG1_SHADOW 0x18 @@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev, return -ENOMEM; } + pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; + /* Get efuse csr region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); if (!res) { -- cgit v0.10.2 From 0ec9ebc706fbd394bc233d87ac7aaad1c4f3ab54 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Wed, 8 Jul 2015 16:28:14 +0200 Subject: dmaengine: mv_xor: fix big endian operation in register mode Commit 6f166312c6ea2 ("dmaengine: mv_xor: add support for a38x command in descriptor mode") introduced the support for a feature that appeared in Armada 38x: specifying the operation to be performed in a per-descriptor basis rather than globally per channel. However, when doing so, it changed the function mv_chan_set_mode() to use: if (IS_ENABLED(__BIG_ENDIAN)) instead of: #if defined(__BIG_ENDIAN) While IS_ENABLED() is perfectly fine for CONFIG_* symbols, it is not for other symbols such as __BIG_ENDIAN that is provided directly by the compiler. Consequently, the commit broke support for big-endian, as the XOR_DESCRIPTOR_SWAP flag was not set in the XOR channel configuration register. The primarily visible effect was some nasty warnings and failures appearing during the self-test of the XOR unit: [ 1.197368] mv_xor d0060900.xor: error on chan 0. intr cause 0x00000082 [ 1.197393] mv_xor d0060900.xor: config 0x00008440 [ 1.197410] mv_xor d0060900.xor: activation 0x00000000 [ 1.197427] mv_xor d0060900.xor: intr cause 0x00000082 [ 1.197443] mv_xor d0060900.xor: intr mask 0x000003f7 [ 1.197460] mv_xor d0060900.xor: error cause 0x00000000 [ 1.197477] mv_xor d0060900.xor: error addr 0x00000000 [ 1.197491] ------------[ cut here ]------------ [ 1.197513] WARNING: CPU: 0 PID: 1 at ../drivers/dma/mv_xor.c:664 mv_xor_interrupt_handler+0x14c/0x170() See also: http://storage.kernelci.org/next/next-20150617/arm-mvebu_v7_defconfig+CONFIG_CPU_BIG_ENDIAN=y/lab-khilman/boot-armada-xp-openblocks-ax3-4.txt Signed-off-by: Thomas Petazzoni Fixes: 6f166312c6ea2 ("dmaengine: mv_xor: add support for a38x command in descriptor mode") Reviewed-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaf1ea..f1325f6 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan, config &= ~0x7; config |= op_mode; - if (IS_ENABLED(__BIG_ENDIAN)) - config |= XOR_DESCRIPTOR_SWAP; - else - config &= ~XOR_DESCRIPTOR_SWAP; +#if defined(__BIG_ENDIAN) + config |= XOR_DESCRIPTOR_SWAP; +#else + config &= ~XOR_DESCRIPTOR_SWAP; +#endif writel_relaxed(config, XOR_CONFIG(chan)); chan->current_type = type; -- cgit v0.10.2 From 8c8fe97b2b8a216523e2faf1ccca66ddab634e3e Mon Sep 17 00:00:00 2001 From: Jun Nie Date: Fri, 10 Jul 2015 20:02:49 +0800 Subject: Revert "dmaengine: virt-dma: don't always free descriptor upon completion" This reverts commit b9855f03d560d351e95301b9de0bc3cad3b31fe9. The patch break existing DMA usage case. For example, audio SOC dmaengine never release channel and cause virt-dma to cache too much memory in descriptor to exhaust system memory. Signed-off-by: Vinod Koul diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d..6f80432 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); - list_move_tail(&vd->node, &vc->desc_submitted); + list_add_tail(&vd->node, &vc->desc_submitted); spin_unlock_irqrestore(&vc->lock, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", @@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) cb_data = vd->tx.callback_param; list_del(&vd->node); - if (async_tx_test_ack(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + + vc->desc_free(vd); if (cb) cb(cb_data); @@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc, node); - if (async_tx_test_ack(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) dma_cookie_init(&vc->chan); spin_lock_init(&vc->lock); - INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75d..181b952 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,7 +29,6 @@ struct virt_dma_chan { spinlock_t lock; /* protected by vc.lock */ - struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; @@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan struct virt_dma_desc *vd, unsigned long tx_flags) { extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); - unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; vd->tx.tx_submit = vchan_tx_submit; - spin_lock_irqsave(&vc->lock, flags); - list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - return &vd->tx; } @@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) } /** - * vchan_get_all_descriptors - obtain all allocated, submitted and issued - * descriptors + * vchan_get_all_descriptors - obtain all submitted and issued descriptors * vc: virtual channel to get descriptors from * head: list of descriptors found * @@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { - list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); @@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) { - struct virt_dma_desc *vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); vchan_get_all_descriptors(vc, &head); - list_for_each_entry(vd, &head, node) - async_tx_clear_ack(&vd->tx); spin_unlock_irqrestore(&vc->lock, flags); vchan_dma_desc_free_list(vc, &head); -- cgit v0.10.2 From c1bfa985ded82cacdfc6403e78f329c44e35534a Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Fri, 29 May 2015 12:04:13 -0400 Subject: ARM: dts: keystone: fix dt bindings to use post div register for mainpll All of the keystone devices have a separate register to hold post divider value for main pll clock. Currently the fixed-postdiv value used for k2hk/l/e SoCs works by sheer luck as u-boot happens to use a value of 2 for this. Now that we have fixed this in the pll clock driver change the dt bindings for the same. Signed-off-by: Murali Karicheri Acked-by: Santosh Shilimkar Signed-off-by: Olof Johansson diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 4773d6a..d56d68f 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi @@ -13,9 +13,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi index d5adee3..af9b719 100644 --- a/arch/arm/boot/dts/k2hk-clocks.dtsi +++ b/arch/arm/boot/dts/k2hk-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi index eb1e3e2..ef8464b 100644 --- a/arch/arm/boot/dts/k2l-clocks.dtsi +++ b/arch/arm/boot/dts/k2l-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { -- cgit v0.10.2 From 8fcd461db7c09337b6d2e22d25eb411123f379e3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 30 Jul 2015 06:57:46 -0400 Subject: nfsd: do nfs4_check_fh in nfs4_check_file instead of nfs4_check_olstateid Currently, preprocess_stateid_op calls nfs4_check_olstateid which verifies that the open stateid corresponds to the current filehandle in the call by calling nfs4_check_fh. If the stateid is a NFS4_DELEG_STID however, then no such check is done. This could cause incorrect enforcement of permissions, because the nfsd_permission() call in nfs4_check_file uses current the current filehandle, but any subsequent IO operation will use the file descriptor in the stateid. Move the call to nfs4_check_fh into nfs4_check_file instead so that it can be done for all stateid types. Signed-off-by: Jeff Layton Cc: stable@vger.kernel.org [bfields: moved fh check to avoid NULL deref in special stateid case] Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 61dfb33..9520271 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry) queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } -static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) +static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) { - if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) + if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) return nfserr_bad_stateid; return nfs_ok; } @@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags) { __be32 status; - status = nfs4_check_fh(fhp, ols); - if (status) - return status; status = nfsd4_check_openowner_confirmed(ols); if (status) return status; @@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, status = nfserr_bad_stateid; break; } + if (status) + goto out; + status = nfs4_check_fh(fhp, s); done: if (!status && filpp) @@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status) return status; - return nfs4_check_fh(current_fh, stp); + return nfs4_check_fh(current_fh, &stp->st_stid); } /* -- cgit v0.10.2 From 40c3ef9d2f14cce91dbd6ae9c9ccf6210d8c5df7 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Mon, 27 Jul 2015 10:27:21 -0700 Subject: staging: comedi: das1800: add missing break in switch Commit 06ad6bd8 "staging: comedi: das1800: cleanup das1800_probe()" Accidently removed the 'break' statement for case 0x8 of the switch. Add it back. Reported-by: coverity (CID 1309550) Signed-off-by: H Hartley Sweeten Reviewed-by: Ian Abbott Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c index bfa4262..94078118 100644 --- a/drivers/staging/comedi/drivers/das1800.c +++ b/drivers/staging/comedi/drivers/das1800.c @@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev) if (index == das1801hc || index == das1802hc) return board; index = das1801hc; + break; default: dev_err(dev->class_dev, "Board model: probe returned 0x%x (unknown, please report)\n", -- cgit v0.10.2 From e3311469734093724b10c2a81c1193197db03b78 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Mon, 27 Jul 2015 17:30:48 +0300 Subject: i2c: fix leaked device refcount on of_find_i2c_* error path If of_find_i2c_device_by_node() or of_find_i2c_adapter_by_node() find a device by node, but its type does not match, a reference to that device is still held. This change fixes the problem. Signed-off-by: Vladimir Zapolskiy Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 8b64cf3..c83e4d1 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1340,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data) struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) { struct device *dev; + struct i2c_client *client; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_client(dev); + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; } EXPORT_SYMBOL(of_find_i2c_device_by_node); @@ -1354,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node); struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) { struct device *dev; + struct i2c_adapter *adapter; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_adapter(dev); + adapter = i2c_verify_adapter(dev); + if (!adapter) + put_device(dev); + + return adapter; } EXPORT_SYMBOL(of_find_i2c_adapter_by_node); #else -- cgit v0.10.2 From 7167bf8b7f3f19eeea4602ccc39f26a26a636d8e Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 31 Jul 2015 10:01:40 +0200 Subject: phy-sun4i-usb: Add missing EXPORT_SYMBOL_GPL for sun4i_usb_phy_set_squelch_detect sun4i_usb_phy_set_squelch_detect is used by other code, which may be built as a module, so it should be exported. Signed-off-by: Hans de Goede Signed-off-by: Kishon Vijay Abraham I diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index e17c539..2dad7e8 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled) sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); } +EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect); static struct phy_ops sun4i_usb_phy_ops = { .init = sun4i_usb_phy_init, -- cgit v0.10.2 From c934b3612747bde6c81cf10c2bbde956c6690aec Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Fri, 17 Jul 2015 16:47:22 +0300 Subject: phy: ti-pipe3: i783 workaround for SATA lockup after dpll unlock/relock SATA_PLL_SOFT_RESET bit of CTRL_CORE_SMA_SW_0 must be toggled between a SATA DPLL unlock and re-lock to prevent SATA lockup. Introduce a new DT parameter 'syscon-pllreset' to provide the syscon regmap access to this register which sits in the control module. If the register is not provided we fallback to the old behaviour i.e. SATA DPLL refclk will not be disabled and we prevent SoC low power states. Signed-off-by: Roger Quadros Signed-off-by: Kishon Vijay Abraham I diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt index 305e3df..9cf9446 100644 --- a/Documentation/devicetree/bindings/phy/ti-phy.txt +++ b/Documentation/devicetree/bindings/phy/ti-phy.txt @@ -82,6 +82,9 @@ Optional properties: - id: If there are multiple instance of the same type, in order to differentiate between each instance "id" can be used (e.g., multi-lane PCIe PHY). If "id" is not provided, it is set to default value of '1'. + - syscon-pllreset: Handle to system control region that contains the + CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0 + register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy. This is usually a subnode of ocp2scp to which it is connected. @@ -100,3 +103,16 @@ usb3phy@4a084400 { "sysclk", "refclk"; }; + +sata_phy: phy@4A096000 { + compatible = "ti,phy-pipe3-sata"; + reg = <0x4A096000 0x80>, /* phy_rx */ + <0x4A096400 0x64>, /* phy_tx */ + <0x4A096800 0x40>; /* pll_ctrl */ + reg-names = "phy_rx", "phy_tx", "pll_ctrl"; + ctrl-module = <&omap_control_sata>; + clocks = <&sys_clkin1>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; + #phy-cells = <0>; +}; diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 3510b81..08020dc 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -52,6 +54,8 @@ #define PLL_LOCK 0x2 #define PLL_IDLE 0x1 +#define SATA_PLL_SOFT_RESET BIT(18) + /* * This is an Empirical value that works, need to confirm the actual * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status @@ -82,6 +86,9 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; + struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */ + unsigned int dpll_reset_reg; /* reg. index within syscon */ + bool sata_refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) + /* If dpll_reset_syscon is not present we wont power down SATA DPLL + * due to Errata i783 + */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") && + !phy->dpll_reset_syscon) return 0; /* PCIe doesn't have internal DPLL */ @@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x) } } + /* i783: SATA needs control bit toggle after PLL unlock */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) { + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET); + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, 0); + } + ti_pipe3_disable_clocks(phy); return 0; @@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev) } } else { phy->wkupclk = ERR_PTR(-ENODEV); + phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-pllreset"); + if (IS_ERR(phy->dpll_reset_syscon)) { + dev_info(&pdev->dev, + "can't get syscon-pllreset, sata dpll won't idle\n"); + phy->dpll_reset_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-pllreset", 1, + &phy->dpll_reset_reg)) { + dev_err(&pdev->dev, + "couldn't get pllreset reg. offset\n"); + return -EINVAL; + } + } } if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { @@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); - /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ - if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) - if (!IS_ERR(phy->refclk)) + + /* + * Prevent auto-disable of refclk for SATA PHY due to Errata i783 + */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) { + if (!IS_ERR(phy->refclk)) { clk_prepare_enable(phy->refclk); + phy->sata_refclk_enabled = true; + } + } generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - if (!IS_ERR(phy->refclk)) + if (!IS_ERR(phy->refclk)) { clk_disable_unprepare(phy->refclk); + /* + * SATA refclk needs an additional disable as we left it + * on in probe to avoid Errata i783 + */ + if (phy->sata_refclk_enabled) { + clk_disable_unprepare(phy->refclk); + phy->sata_refclk_enabled = false; + } + } + if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); } -- cgit v0.10.2 From 97242f99a013950af63effa0732f8ef7db4e31ec Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 1 Aug 2015 19:59:28 -0400 Subject: link_path_walk(): be careful when failing with ENOTDIR In RCU mode we might end up with dentry evicted just we check that it's a directory. In such case we should return ECHILD rather than ENOTDIR, so that pathwalk would be retries in non-RCU mode. Breakage had been introduced in commit b18825a - prior to that we were looking at nd->inode, which had been fetched before verifying that ->d_seq was still valid. That form of check would only be satisfied if at some point the pathname prefix would indeed have resolved to a non-directory. The fix consists of checking ->d_seq after we'd run into a non-directory dentry, and failing with ECHILD in case of mismatch. Note that all branches since 3.12 have that problem... Signed-off-by: Al Viro diff --git a/fs/namei.c b/fs/namei.c index ae4e4c1..fbbcf09 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1954,8 +1954,13 @@ OK: continue; } } - if (unlikely(!d_can_lookup(nd->path.dentry))) + if (unlikely(!d_can_lookup(nd->path.dentry))) { + if (nd->flags & LOOKUP_RCU) { + if (unlazy_walk(nd, NULL, 0)) + return -ECHILD; + } return -ENOTDIR; + } } } -- cgit v0.10.2 From 27667f4744fc5a0f3e50910e78740bac5670d18b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 29 Jul 2015 22:18:16 -0700 Subject: i915: temporary fix for DP MST docking station NULL pointer dereference Ted Ts'o reports that his Lenovo T540p ThinkPad crashes at boot if attached to the docking station. This is a regression that he was able to bisect to commit 8c7b5ccb7298: "drm/i915: Use atomic helpers for computing changed flags:" The reason seems to be the new call to drm_atomic_helper_check_modeset() added to intel_modeset_compute_config(), which in turn calls update_connector_routing(), and somehow ends up picking a NULL crtc for the connector state, causing the subsequent drm_crtc_index() to OOPS. Daniel Vetter says that the fundamental issue seems to be confusion in the encoder selection, and this isn't the right fix, but while he chases down the proper fix, this at least avoids the NULL pointer dereference and makes Ted's docking station work again. Reported-bisected-and-tested-by: Theodore Ts'o Cc: Daniel Vetter Cc: Mani Nikula Cc: Dave Airlie Signed-off-by: Linus Torvalds diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5b59d5ad..aac2122 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -230,10 +230,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } connector_state->best_encoder = new_encoder; - idx = drm_crtc_index(connector_state->crtc); + if (connector_state->crtc) { + idx = drm_crtc_index(connector_state->crtc); - crtc_state = state->crtc_states[idx]; - crtc_state->mode_changed = true; + crtc_state = state->crtc_states[idx]; + crtc_state->mode_changed = true; + } DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", connector->base.id, -- cgit v0.10.2 From 1ebd47efa4e17391dfac8caa349c6a8d35f996d1 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 2 Aug 2015 19:29:16 +0200 Subject: rocker: free netdevice during netdevice removal When removing a port's netdevice in 'rocker_remove_ports', we should also free the allocated 'net_device' structure. Do that by calling 'free_netdev' after unregistering it. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Fixes: 4b8ac9660af ("rocker: introduce rocker switch driver") Acked-by: Scott Feldman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 2d8578cade..2e7f9a2 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker) rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, ROCKER_OP_FLAG_REMOVE); unregister_netdev(rocker_port->dev); + free_netdev(rocker_port->dev); } kfree(rocker->ports); } -- cgit v0.10.2 From 3d0e0af40672a0bf16ca0f0591165535138c1f30 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 Jul 2015 17:53:39 -0700 Subject: fq_codel: explicitly reset flows in ->reset() Alex reported the following crash when using fq_codel with htb: crash> bt PID: 630839 TASK: ffff8823c990d280 CPU: 14 COMMAND: "tc" [... snip ...] #8 [ffff8820ceec17a0] page_fault at ffffffff8160a8c2 [exception RIP: htb_qlen_notify+24] RIP: ffffffffa0841718 RSP: ffff8820ceec1858 RFLAGS: 00010282 RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff88241747b400 RDX: ffff88241747b408 RSI: 0000000000000000 RDI: ffff8811fb27d000 RBP: ffff8820ceec1868 R8: ffff88120cdeff24 R9: ffff88120cdeff30 R10: 0000000000000bd4 R11: ffffffffa0840919 R12: ffffffffa0843340 R13: 0000000000000000 R14: 0000000000000001 R15: ffff8808dae5c2e8 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 #9 [...] qdisc_tree_decrease_qlen at ffffffff81565375 #10 [...] fq_codel_dequeue at ffffffffa084e0a0 [sch_fq_codel] #11 [...] fq_codel_reset at ffffffffa084e2f8 [sch_fq_codel] #12 [...] qdisc_destroy at ffffffff81560d2d #13 [...] htb_destroy_class at ffffffffa08408f8 [sch_htb] #14 [...] htb_put at ffffffffa084095c [sch_htb] #15 [...] tc_ctl_tclass at ffffffff815645a3 #16 [...] rtnetlink_rcv_msg at ffffffff81552cb0 [... snip ...] As Jamal pointed out, there is actually no need to call dequeue to purge the queued skb's in reset, data structures can be just reset explicitly. Therefore, we reset everything except config's and stats, so that we would have a fresh start after device flipping. Fixes: 4b549a2ef4be ("fq_codel: Fair Queue Codel AQM") Reported-by: Alex Gartrell Cc: Alex Gartrell Cc: Jamal Hadi Salim Signed-off-by: Eric Dumazet [xiyou.wangcong@gmail.com: added codel_vars_init() and qdisc_qstats_backlog_dec()] Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 21ca33c..a9ba030 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -288,10 +288,26 @@ begin: static void fq_codel_reset(struct Qdisc *sch) { - struct sk_buff *skb; + struct fq_codel_sched_data *q = qdisc_priv(sch); + int i; - while ((skb = fq_codel_dequeue(sch)) != NULL) - kfree_skb(skb); + INIT_LIST_HEAD(&q->new_flows); + INIT_LIST_HEAD(&q->old_flows); + for (i = 0; i < q->flows_cnt; i++) { + struct fq_codel_flow *flow = q->flows + i; + + while (flow->head) { + struct sk_buff *skb = dequeue_head(flow); + + qdisc_qstats_backlog_dec(sch, skb); + kfree_skb(skb); + } + + INIT_LIST_HEAD(&flow->flowchain); + codel_vars_init(&flow->cvars); + } + memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); + sch->q.qlen = 0; } static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { -- cgit v0.10.2 From 74d33293e467df61de1b1d8b2fbe29e550dec33b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 2 Aug 2015 18:34:55 -0700 Subject: Linux 4.2-rc5 diff --git a/Makefile b/Makefile index afabc44..e79448d 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* -- cgit v0.10.2 From 423f04d63cf421ea436bcc5be02543d549ce4b28 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 27 Jul 2015 11:48:52 +1000 Subject: md/raid1: extend spinlock to protect raid1_end_read_request against inconsistencies raid1_end_read_request() assumes that the In_sync bits are consistent with the ->degaded count. raid1_spare_active updates the In_sync bit before the ->degraded count and so exposes an inconsistency, as does error() So extend the spinlock in raid1_spare_active() and error() to hide those inconsistencies. This should probably be part of Commit: 34cab6f42003 ("md/raid1: fix test for 'was read error from last working device'.") as it addresses the same issue. It fixes the same bug and should go to -stable for same reasons. Fixes: 76073054c95b ("md/raid1: clean up read_balance.") Cc: stable@vger.kernel.org (v3.0+) Signed-off-by: NeilBrown diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 94f5b55..967a4ed 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; + unsigned long flags; /* * If it is not operational, then we have already marked it as dead @@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) return; } set_bit(Blocked, &rdev->flags); + spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; set_bit(Faulty, &rdev->flags); - spin_unlock_irqrestore(&conf->device_lock, flags); } else set_bit(Faulty, &rdev->flags); + spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. */ @@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev) * Find all failed disks within the RAID1 configuration * and mark them readable. * Called under mddev lock, so rcu protection not needed. + * device_lock used to avoid races with raid1_end_read_request + * which expects 'In_sync' flags and ->degraded to be consistent. */ + spin_lock_irqsave(&conf->device_lock, flags); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = conf->mirrors[i].rdev; struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; @@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev) sysfs_notify_dirent_safe(rdev->sysfs_state); } } - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; spin_unlock_irqrestore(&conf->device_lock, flags); -- cgit v0.10.2 From 528464eaa46ae1bd319882e4dd3495802e55b8c4 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 23 Jul 2015 14:32:32 +0530 Subject: thermal: remove dangling 'weight_attr' device file This file isn't getting removed while we unbind a device from thermal zone. And this causes following messages when the device is registered again: WARNING: CPU: 0 PID: 2228 at /home/viresh/linux/fs/sysfs/dir.c:31 sysfs_warn_dup+0x60/0x70() sysfs: cannot create duplicate filename '/devices/virtual/thermal/thermal_zone0/cdev0_weight' Modules linked in: cpufreq_dt(+) [last unloaded: cpufreq_dt] CPU: 0 PID: 2228 Comm: insmod Not tainted 4.2.0-rc3-00059-g44fffd9473eb #272 Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0x84/0xc4) [] (dump_stack) from [] (warn_slowpath_common+0x80/0xb0) [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x30/0x40) [] (warn_slowpath_fmt) from [] (sysfs_warn_dup+0x60/0x70) [] (sysfs_warn_dup) from [] (sysfs_add_file_mode_ns+0x13c/0x190) [] (sysfs_add_file_mode_ns) from [] (sysfs_create_file_ns+0x3c/0x48) [] (sysfs_create_file_ns) from [] (thermal_zone_bind_cooling_device+0x260/0x358) [] (thermal_zone_bind_cooling_device) from [] (of_thermal_bind+0x88/0xb4) [] (of_thermal_bind) from [] (__thermal_cooling_device_register+0x17c/0x2e0) [] (__thermal_cooling_device_register) from [] (__cpufreq_cooling_register+0x3a0/0x51c) [] (__cpufreq_cooling_register) from [] (cpufreq_ready+0x44/0x88 [cpufreq_dt]) [] (cpufreq_ready [cpufreq_dt]) from [] (cpufreq_add_dev+0x4a0/0x7dc) [] (cpufreq_add_dev) from [] (subsys_interface_register+0x94/0xd8) [] (subsys_interface_register) from [] (cpufreq_register_driver+0x10c/0x1f0) [] (cpufreq_register_driver) from [] (dt_cpufreq_probe+0x60/0x8c [cpufreq_dt]) [] (dt_cpufreq_probe [cpufreq_dt]) from [] (platform_drv_probe+0x44/0xa4) [] (platform_drv_probe) from [] (driver_probe_device+0x174/0x2b4) [] (driver_probe_device) from [] (__driver_attach+0x8c/0x90) [] (__driver_attach) from [] (bus_for_each_dev+0x68/0x9c) [] (bus_for_each_dev) from [] (bus_add_driver+0x19c/0x214) [] (bus_add_driver) from [] (driver_register+0x78/0xf8) [] (driver_register) from [] (do_one_initcall+0x8c/0x1d4) [] (do_one_initcall) from [] (do_init_module+0x5c/0x1b8) [] (do_init_module) from [] (load_module+0xd34/0xed8) [] (load_module) from [] (SyS_init_module+0xd0/0x120) [] (SyS_init_module) from [] (ret_fast_syscall+0x0/0x3c) ---[ end trace 3be0e7b7dc6e3c4f ]--- Fixes: db91651311c8 ("thermal: export weight to sysfs") Acked-by: Javi Merino Signed-off-by: Viresh Kumar Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 04659bf..4ca211b 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, return -ENODEV; unbind: + device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); release_idr(&tz->idr, &tz->lock, pos->id); -- cgit v0.10.2 From d5f831090191fa26a539e12456af64547ab11dde Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Fri, 3 Jul 2015 10:24:33 +0100 Subject: thermal: power_allocator: trace the real requested power The power allocator governor uses ftrace to output a bunch of internal data for debugging and tuning. Currently, the requested power it outputs is the "weighted" requested power, that is, what each cooling device has requested multiplied by the cooling device weight. It is more useful to trace the real request, without any weight being applied. This commit only affects the data traced, there is no functional change. Cc: Zhang Rui Cc: Eduardo Valentin Signed-off-by: Javi Merino Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 4672250..63a448f 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz, struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; u32 *req_power, *max_power, *granted_power, *extra_actor_power; - u32 total_req_power, max_allocatable_power; + u32 *weighted_req_power; + u32 total_req_power, max_allocatable_power, total_weighted_req_power; u32 total_granted_power, power_range; int i, num_actors, total_weight, ret = 0; int trip_max_desired_temperature = params->trip_max_desired_temperature; @@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz, } /* - * We need to allocate three arrays of the same size: - * req_power, max_power and granted_power. They are going to - * be needed until this function returns. Allocate them all - * in one go to simplify the allocation and deallocation - * logic. + * We need to allocate five arrays of the same size: + * req_power, max_power, granted_power, extra_actor_power and + * weighted_req_power. They are going to be needed until this + * function returns. Allocate them all in one go to simplify + * the allocation and deallocation logic. */ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); - req_power = devm_kcalloc(&tz->device, num_actors * 4, + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); + req_power = devm_kcalloc(&tz->device, num_actors * 5, sizeof(*req_power), GFP_KERNEL); if (!req_power) { ret = -ENOMEM; @@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz, max_power = &req_power[num_actors]; granted_power = &req_power[2 * num_actors]; extra_actor_power = &req_power[3 * num_actors]; + weighted_req_power = &req_power[4 * num_actors]; i = 0; + total_weighted_req_power = 0; total_req_power = 0; max_allocatable_power = 0; @@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz, else weight = instance->weight; - req_power[i] = frac_to_int(weight * req_power[i]); + weighted_req_power[i] = frac_to_int(weight * req_power[i]); if (power_actor_get_max_power(cdev, tz, &max_power[i])) continue; total_req_power += req_power[i]; max_allocatable_power += max_power[i]; + total_weighted_req_power += weighted_req_power[i]; i++; } @@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz, power_range = pid_controller(tz, current_temp, control_temp, max_allocatable_power); - divvy_up_power(req_power, max_power, num_actors, total_req_power, - power_range, granted_power, extra_actor_power); + divvy_up_power(weighted_req_power, max_power, num_actors, + total_weighted_req_power, power_range, granted_power, + extra_actor_power); total_granted_power = 0; i = 0; -- cgit v0.10.2 From 5f09a5cbd14ae16e93866040fa44d930ff885650 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 8 Jun 2015 10:35:49 +0900 Subject: thermal: exynos: Disable the regulator on probe failure During probe the regulator (if present) was enabled but not disabled in case of failure. So an unsuccessful probe lead to enabling the regulator which was actually not needed because the device was not enabled. Additionally each deferred probe lead to increase of regulator enable count so it would not be effectively disabled during removal of the device. Test HW: Exynos4412 - Trats2 board Signed-off-by: Krzysztof Kozlowski Fixes: 498d22f616f6 ("thermal: exynos: Support for TMU regulator defined at device tree") Cc: Reviewed-by: Javier Martinez Canillas Signed-off-by: Lukasz Majewski Tested-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 531f4b17..13c3ace 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1392,6 +1392,8 @@ err_clk_sec: if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); err_sensor: + if (!IS_ERR_OR_NULL(data->regulator)) + regulator_disable(data->regulator); thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); return ret; -- cgit v0.10.2 From f87e6bd3f7401ee3c04248d8ea2dc32883a1f8fb Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 2 Jul 2015 15:40:00 +0900 Subject: thermal: exynos: Add the dependency of CONFIG_THERMAL_OF instead of CONFIG_OF The exynos thermal driver use the of_thermal_*() API to parse the basic data for thermal management from devicetree file. So, if CONFIG_EXYNOS_THERMAL is selected without CONFIG_THERMAL_OF, kernel can build it without any problem. But, exynos thermal driver is not working with following error log. This patch add the dependency of CONFIG_THERMAL_OF instead of CONFIG_OF. [ 1.458644] get_th_reg: Cannot get trip points from of-thermal.c! [ 1.459096] get_th_reg: Cannot get trip points from of-thermal.c! [ 1.465211] exynos4412_tmu_initialize: No CRITICAL trip point defined at of-thermal.c! Cc: Zhang Rui Cc: Eduardo Valentin Cc: Lukasz Majewski Signed-off-by: Chanwoo Choi Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig index c8e35c1..e0da386 100644 --- a/drivers/thermal/samsung/Kconfig +++ b/drivers/thermal/samsung/Kconfig @@ -1,6 +1,6 @@ config EXYNOS_THERMAL tristate "Exynos thermal management unit driver" - depends on OF + depends on THERMAL_OF help If you say yes here you get support for the TMU (Thermal Management Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises -- cgit v0.10.2 From 3c19d237dd8148926e49259e495ee41dddd1f09c Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 2 Jul 2015 15:40:01 +0900 Subject: thermal: exynos: Remove unused code related to platform_data on probe() This patch removes the unused code related to struct exynos_tmu_platform_data because exynos_tmu_probe() don't handle the struct exynos_tmu_platform_data *pdata. Test HW: Exynos4412 - Trats2 board Cc: Zhang Rui Cc: Eduardo Valentin Cc: Lukasz Majewski Signed-off-by: Chanwoo Choi Signed-off-by: Lukasz Majewski Tested-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 13c3ace..c96ff10 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = { static int exynos_tmu_probe(struct platform_device *pdev) { - struct exynos_tmu_platform_data *pdata; struct exynos_tmu_data *data; int ret; @@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) if (ret) goto err_sensor; - pdata = data->pdata; - INIT_WORK(&data->irq_work, exynos_tmu_work); data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); -- cgit v0.10.2 From b6878d9e03043695dbf3fa1caa6dfc09db225b16 Mon Sep 17 00:00:00 2001 From: Benjamin Randazzo Date: Sat, 25 Jul 2015 16:36:50 +0200 Subject: md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo Signed-off-by: NeilBrown diff --git a/drivers/md/md.c b/drivers/md/md.c index 0c2a4e8..e25f00f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg) char *ptr; int err; - file = kmalloc(sizeof(*file), GFP_NOIO); + file = kzalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; -- cgit v0.10.2 From 49895bcc7e566ba455eb2996607d6fbd3447ce16 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 3 Aug 2015 17:09:57 +1000 Subject: md/raid5: don't let shrink_slab shrink too far. I have a report of drop_one_stripe() called from raid5_cache_scan() apparently finding ->max_nr_stripes == 0. This should not be allowed. So add a test to keep max_nr_stripes above min_nr_stripes. Also use a 'mask' rather than a 'mod' in drop_one_stripe to ensure 'hash' is valid even if max_nr_stripes does reach zero. Fixes: edbe83ab4c27 ("md/raid5: allow the stripe_cache to grow and shrink.") Cc: stable@vger.kernel.org (4.1 - please release with 2d5b569b665) Reported-by: Tomas Papan Signed-off-by: NeilBrown diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 643d217..f757023 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) static int drop_one_stripe(struct r5conf *conf) { struct stripe_head *sh; - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; spin_lock_irq(conf->hash_locks + hash); sh = get_free_stripe(conf, hash); @@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, if (mutex_trylock(&conf->cache_size_mutex)) { ret= 0; - while (ret < sc->nr_to_scan) { + while (ret < sc->nr_to_scan && + conf->max_nr_stripes > conf->min_nr_stripes) { if (drop_one_stripe(conf) == 0) { ret = SHRINK_STOP; break; -- cgit v0.10.2 From 9b2b6f7f357d9a2473017295bb26ff907e149d34 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sun, 19 Jul 2015 23:27:50 +0200 Subject: CPUFREQ: Loongson2: Fix broken build due to incorrect include. 71eeedcf51544831ae356a773814401143ed32d4 (MIPS: Lemote 2F: Fix build caused by recent mass rename.) only fixed one instance of this issue in arch/mips but missed a 2nd one in drivers/cpufreq/loongson2_cpufreq.c. [ralf@linux-mips.org: dropped the one segment for the already fixed instance and changed the other avoiding an include without a / because that's generally is a bad idea.] Signed-off-by: Ralf Baechle Acked-by: Viresh Kumar Patchwork: https://patchwork.linux-mips.org/patch/10659/ diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e362860..cd593c1 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -20,7 +20,7 @@ #include #include -#include +#include static uint nowait; -- cgit v0.10.2 From d3557d96163d7d8250549a438232c5dd62d96959 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 19 Jul 2015 14:42:49 +0200 Subject: MIPS: Fix build with CONFIG_OF=y for non OF-enabled targets Commit 01306aeadd75 ("MIPS: prepare for user enabling of CONFIG_OF") changed the guards in asm/prom.h from CONFIG_OF to CONFIG_USE_OF, but missed the actual function declarations in kernel/prom.c, which have additional dependencies. Fixes the following build error: CC arch/mips/kernel/prom.o arch/mips/kernel/prom.c: In function '__dt_setup_arch': arch/mips/kernel/prom.c:54:2: error: implicit declaration of function 'early_init_dt_scan' [-Werror=implicit-function-declaration] if (!early_init_dt_scan(bph)) ^ Fixes: 01306aeadd75 ("MIPS: prepare for user enabling of CONFIG_OF") Signed-off-by: Jonas Gorski Acked-by: Rob Herring Cc: linux-mips@linux-mips.org Cc: devicetree@vger.kernel.org Cc: Grant Likely Patchwork: https://patchwork.linux-mips.org/patch/10741/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index b130033..5fcec30 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -38,7 +38,7 @@ char *mips_get_machine_name(void) return mips_machine_name; } -#ifdef CONFIG_OF +#ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { return add_memory_region(base, size, BOOT_MEM_RAM); -- cgit v0.10.2 From 1d62d737555e1378eb62a8bba26644f7d97139d2 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 19 Jul 2015 00:38:41 +0200 Subject: MIPS: Fix sched_getaffinity with MT FPAFF enabled p->thread.user_cpus_allowed is zero-initialized and is only filled on the first sched_setaffinity call. To avoid adding overhead in the task initialization codepath, simply OR the returned mask in sched_getaffinity with p->cpus_allowed. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10740/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 3e4491a..789d7bf 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { unsigned int real_len; - cpumask_t mask; + cpumask_t allowed, mask; int retval; struct task_struct *p; @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); + cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: read_unlock(&tasklist_lock); -- cgit v0.10.2 From 531a6d599f4304156236ebdd531aaa80be61868d Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Tue, 23 Jun 2015 12:02:00 +0100 Subject: MIPS: unaligned: Fix build error on big endian R6 kernels Commit eeb538950367 ("MIPS: unaligned: Prevent EVA instructions on kernel unaligned accesses") renamed the Load* and Store* defines in unaligned.c to _Load* and _Store* as part of its fix. One define was missed out which causes big endian R6 kernels to fail to build. arch/mips/kernel/unaligned.c:880:35: error: implicit declaration of function '_StoreDW' #define StoreDW(addr, value, res) _StoreDW(addr, value, res) ^ Signed-off-by: James Cowgill Fixes: eeb538950367 ("MIPS: unaligned: Prevent EVA instructions on kernel unaligned accesses") Cc: Markos Chandras Cc: # 4.0+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10575/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index af84bef..eb3efd1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -438,7 +438,7 @@ do { \ : "memory"); \ } while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ -- cgit v0.10.2 From 106eccb4d20f35ebc58ff2286c170d9e79c5ff68 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 17 Jul 2015 15:54:41 +0100 Subject: MIPS: Malta: Don't reinitialise RTC On Malta, since commit a87ea88d8f6c ("MIPS: Malta: initialise the RTC at boot"), the RTC is reinitialised and forced into binary coded decimal (BCD) mode during init, even if the bootloader has already initialised it, and may even have already put it into binary mode (as YAMON does). This corrupts the current time, can result in the RTC seconds being an invalid BCD (e.g. 0x1a..0x1f) for up to 6 seconds, as well as confusing YAMON for a while after reset, enough for it to report timeouts when attempting to load from TFTP (it actually uses the RTC in that code). Therefore only initialise the RTC to the extent that is necessary so that Linux avoids interfering with the bootloader setup, while also allowing it to estimate the CPU frequency without hanging, without a bootloader necessarily having done anything with the RTC (for example when the kernel is loaded via EJTAG). The divider control is configured for a 32KHZ reference clock if necessary, and the SET bit of the RTC_CONTROL register is cleared if necessary without changing any other bits (this bit will be set when coming out of reset if the battery has been disconnected). Fixes: a87ea88d8f6c ("MIPS: Malta: initialise the RTC at boot") Signed-off-by: James Hogan Reviewed-by: Paul Burton Cc: Ralf Baechle Cc: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Cc: # 3.14+ Patchwork: https://patchwork.linux-mips.org/patch/10739/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 5625b19..c1dc730 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -171,14 +171,17 @@ unsigned int get_c0_compare_int(void) static void __init init_rtc(void) { - /* stop the clock whilst setting it up */ - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); + unsigned char freq, ctrl; - /* 32KHz time base */ - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); + /* Set 32KHz time base if not already set */ + freq = CMOS_READ(RTC_FREQ_SELECT); + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); - /* start the clock */ - CMOS_WRITE(RTC_24H, RTC_CONTROL); + /* Ensure SET bit is clear so RTC can run */ + ctrl = CMOS_READ(RTC_CONTROL); + if (ctrl & RTC_SET) + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); } void __init plat_time_init(void) -- cgit v0.10.2 From e070dab73523bcbf2409ba8b5c342d53bccc0959 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 23 Jul 2015 11:10:38 +0200 Subject: MIPS: Handle page faults of executable but unreadable pages correctly. Without this we end taking execeptions in an endless loop hanging the thread. Signed-off-by: Ralf Baechle diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 36c0f26..852a41c 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -133,7 +133,8 @@ good_area: #endif goto bad_area; } - if (!(vma->vm_flags & VM_READ)) { + if (!(vma->vm_flags & VM_READ) && + exception_epc(regs) != address) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), -- cgit v0.10.2 From 55fdcb2d56b6edc027657fde9da0c4f224d32303 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 23 Jul 2015 11:10:59 +0200 Subject: MIPS: Partially disable RIXI support. Execution of break instruction, trap instructions, emulation of unaligned loads or floating point instructions - anything that tries to read the instruction's opcode from userspace - needs read access to a page. RIXI (Read Inhibit / Execute Inhibit) support however allows the creation of pags that are executable but not readable. On such a mapping the attempted load of the opcode by the kernel is going to cause an endless loop of page faults. The quick workaround for this is to disable the combinations that the kernel currently isn't able to handle which are executable mappings. Signed-off-by: Ralf Baechle diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 77d96db..aab218c 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -160,18 +160,18 @@ static inline void setup_protection_map(void) protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); } else { -- cgit v0.10.2 From 4ace6139bf23ab4f152ba4207fc10b76cc01d2a5 Mon Sep 17 00:00:00 2001 From: Alex Smith Date: Fri, 24 Jul 2015 16:57:49 +0100 Subject: MIPS: SMP: Don't increment irq_count multiple times for call function IPIs The majority of SMP platforms handle their IPIs through do_IRQ() which calls irq_{enter/exit}(). When a call function IPI is received, smp_call_function_interrupt() is called which also calls irq_{enter,exit}(), meaning irq_count is raised twice. When tick broadcasting is used (which is implemented via a call function IPI), this incorrectly causes all CPU idle time on the core receiving broadcast ticks to be accounted as time spent servicing IRQs, as account_process_tick() will account as such if irq_count is greater than 1. This results in 100% CPU usage being reported on a core which receives its ticks via broadcast. This patch removes the SMP smp_call_function_interrupt() wrapper which calls irq_{enter,exit}(). Platforms which handle their IPIs through do_IRQ() now call generic_smp_call_function_interrupt() directly to avoid incrementing irq_count a second time. Platforms which don't (loongson, sgi-ip27, sibyte) call generic_smp_call_function_interrupt() wrapped in irq_{enter,exit}(). Signed-off-by: Alex Smith Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10770/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 56f5d08..b7fa9ae 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 16f1ea9..03722d4 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif -extern asmlinkage void smp_call_function_interrupt(void); - static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 336708a..78cf8c2 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) if (action == 0) scheduler_ipi(); else - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d0744cc..a31896c 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -192,16 +192,6 @@ asmlinkage void start_secondary(void) cpu_startup_entry(CPUHP_ONLINE); } -/* - * Call into both interrupt handlers, as we share the IPI for them - */ -void __irq_entry smp_call_function_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_interrupt(); - irq_exit(); -} - static void stop_this_cpu(void *dummy) { /* diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 6ab1057..be18648 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index 509877c..1a4738a 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index d1392f8..fa8f591 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index dc3e327..f5fff22 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) { clear_c0_eimr(irq); ack_c0_eirr(irq); - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); set_c0_eimr(irq); } diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 42181c7..f8d3e08 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index 1017058..ffa0f71 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 3fbaef9..16ec4e1 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else #endif { diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index af7d44e..4c71aea 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -29,8 +29,6 @@ #include #include -extern void smp_call_function_interrupt(void); - /* * These are routines for dealing with the bcm1480 smp capabilities * independent of board/firmware @@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c0c4b3f..1cf66f5 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b7d54d4..ff4be05 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } -- cgit v0.10.2 From 55c723e181ccec30fb5c672397fe69ec35967d97 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 27 Jul 2015 13:50:21 +0100 Subject: MIPS: do_mcheck: Fix kernel code dump with EVA If a machine check exception is raised in kernel mode, user context, with EVA enabled, then the do_mcheck handler will attempt to read the code around the EPC using EVA load instructions, i.e. as if the reads were from user mode. This will either read random user data if the process has anything mapped at the same address, or it will cause an exception which is handled by __get_user, resulting in this output: Code: (Bad address in epc) Fix by setting the current user access mode to kernel if the saved register context indicates the exception was taken in kernel mode. This causes __get_user to use normal loads to read the kernel code. Signed-off-by: James Hogan Cc: Markos Chandras Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Cc: # 3.15+ Patchwork: https://patchwork.linux-mips.org/patch/10777/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e207a43..02484d1 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1519,6 +1519,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; + mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1540,8 +1541,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } + if (!user_mode(regs)) + set_fs(KERNEL_DS); + show_code((unsigned int __user *) regs->cp0_epc); + set_fs(old_fs); + /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) -- cgit v0.10.2 From 1e77863a51698c4319587df34171bd823691a66a Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 27 Jul 2015 13:50:22 +0100 Subject: MIPS: show_stack: Fix stack trace with EVA The show_stack() function deals exclusively with kernel contexts, but if it gets called in user context with EVA enabled, show_stacktrace() will attempt to access the stack using EVA accesses, which will either read other user mapped data, or more likely cause an exception which will be handled by __get_user(). This is easily reproduced using SysRq t to show all task states, which results in the following stack dump output: Stack : (Bad stack address) Fix by setting the current user access mode to kernel around the call to show_stacktrace(). This causes __get_user() to use normal loads to read the kernel stack. Now we get the correct output, like this: Stack : 00000000 80168960 00000000 004a0000 00000000 00000000 8060016c 1f3abd0c 1f172cd8 8056f09c 7ff1e450 8014fc3c 00000001 806dd0b0 0000001d 00000002 1f17c6a0 1f17c804 1f17c6a0 8066f6e0 00000000 0000000a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0110e800 1f3abd6c 1f17c6a0 ... Signed-off-by: James Hogan Cc: Markos Chandras Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Cc: # 3.15+ Patchwork: https://patchwork.linux-mips.org/patch/10778/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 02484d1..8ea28e6 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task, void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; + mm_segment_t old_fs = get_fs(); if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; @@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp) prepare_frametrace(®s); } } + /* + * show_stack() deals exclusively with kernel mode, so be sure to access + * the stack in the kernel (not user) address space. + */ + set_fs(KERNEL_DS); show_stacktrace(task, ®s); + set_fs(old_fs); } static void show_code(unsigned int __user *pc) -- cgit v0.10.2 From 0cb0985f57783c2f3c6c8ffe7e7665e80c56bd92 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 23 Jul 2015 18:59:52 +0200 Subject: MIPS: Export get_c0_perfcount_int() get_c0_perfcount_int is tested from oprofile code. If oprofile is compiled as module, get_c0_perfcount_int needs to be exported, otherwise it cannot be resolved. Fixes: a669efc4a3b4 ("MIPS: Add hook to get C0 performance counter interrupt") Cc: stable@vger.kernel.org # v3.19+ Signed-off-by: Felix Fietkau Cc: linux-mips@linux-mips.org Cc: abrestic@chromium.org Patchwork: https://patchwork.linux-mips.org/patch/10763/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 01a644f..1ba2120 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -190,6 +190,7 @@ int get_c0_perfcount_int(void) { return ATH79_MISC_IRQ(5); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index be18648..2c218c3 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -466,6 +466,7 @@ int get_c0_perfcount_int(void) { return ltq_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index c1dc730..b7bf721 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -154,6 +154,7 @@ int get_c0_perfcount_int(void) return mips_cpu_perf_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index e1d6989..a120b7a 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -77,6 +77,7 @@ int get_c0_perfcount_int(void) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 7c73fcb..8a37734 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -26,6 +26,7 @@ int get_c0_perfcount_int(void) { return gic_get_c0_perfcount_int(); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); int get_c0_fdc_int(void) { diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 53707aa..8c624a8 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -89,6 +89,7 @@ int get_c0_perfcount_int(void) { return rt_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { -- cgit v0.10.2 From 3592bb08fb2da21157bc2837c4c9a951b84d0c7a Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Tue, 28 Apr 2015 10:52:50 -0700 Subject: MIPS: BMIPS: Delete unused Kconfig symbol This was left over from an earlier iteration of the BMIPS irqchip changes. It doesn't actually have an effect, so let's nuke it. Reported-by: Valentin Rothberg Signed-off-by: Kevin Cernekee Acked-by: Florian Fainelli Cc: stable@vger.kernel.org # v4.1+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9910/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cee5f93..199a835 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -151,7 +151,6 @@ config BMIPS_GENERIC select BCM7120_L2_IRQ select BRCMSTB_L2_IRQ select IRQ_MIPS_CPU - select RAW_IRQ_ACCESSORS select DMA_NONCOHERENT select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN -- cgit v0.10.2 From 8ec7cfce3762299ae289c384e281b2f4010ae231 Mon Sep 17 00:00:00 2001 From: Tomer Barletz Date: Sun, 2 Aug 2015 02:08:57 -0700 Subject: ALSA: oxygen: Fix logical-not-parentheses warning This fixes the following warning, that is seen with gcc 5.1: warning: logical not is only applied to the left hand side of comparison [-Wlogical-not-parentheses]. Signed-off-by: Tomer Barletz Signed-off-by: Takashi Iwai diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c index 6492bca..4ca1266 100644 --- a/sound/pci/oxygen/oxygen_mixer.c +++ b/sound/pci/oxygen/oxygen_mixer.c @@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl, int changed; mutex_lock(&chip->mutex); - changed = !value->value.integer.value[0] != chip->dac_mute; + changed = (!value->value.integer.value[0]) != chip->dac_mute; if (changed) { chip->dac_mute = !value->value.integer.value[0]; chip->model.update_dac_mute(chip); -- cgit v0.10.2 From 247bfb65d731350093f5d1a0a8b3d65e49c17baa Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 28 Jul 2015 19:24:24 -0700 Subject: Revert "MIPS: BCM63xx: Provide a plat_post_dma_flush hook" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 3cf29543413207d3ab1c3f62a88c09bb46f2264e ("MIPS: BCM63xx: Provide a plat_post_dma_flush hook") since this commit was found to prevent BCM6358 (early BMIPS4350 cores) and some BCM6368 (BMIPS4380 cores) from booting reliably. Alvaro was able to track this down to an issue specifically located to devices that use the second thread (TP1) when booting. Since BCM63xx did not have a need for plat_post_dma_flush() hook before, let's just keep things the way they were. Reported-by: Álvaro Fernández Rojas Reported-by: Jonas Gorski Signed-off-by: Florian Fainelli Cc: stable@vger.kernel.org Cc: Kevin Cernekee Cc: Nicolas Schichan Cc: linux-mips@linux-mips.org Cc: blogic@openwrt.org Cc: noltari@gmail.com Cc: jogo@openwrt.org Cc: Florian Fainelli Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10804/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h deleted file mode 100644 index 11d3b57..0000000 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H - -#include - -#define plat_post_dma_flush bmips_post_dma_flush - -#include - -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */ -- cgit v0.10.2 From 741e3b9902d11585e18bfc7f8d47e913616bb070 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sun, 2 Aug 2015 13:24:13 -0500 Subject: rtlwifi: rtl8723be: Add module parameter for MSI interrupts The driver code allows for the disabling of MSI interrupts; however the module_parm line was missed and the option fails to show with modinfo. Signed-off-by: Larry Finger Cc: Stable [3.15+] Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index 1017f02..7bf88d9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); -- cgit v0.10.2 From 3aff47c062b944a5e1f9af56a37a23f5295628fc Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 31 Jul 2015 16:29:38 +0100 Subject: MIPS: Flush RPS on kernel entry with EVA When EVA is enabled, flush the Return Prediction Stack (RPS) present on some MIPS cores on entry to the kernel from user mode. This is important specifically for interAptiv with EVA enabled, otherwise kernel mode RPS mispredicts may trigger speculative fetches of user return addresses, which may be sensitive in the kernel address space due to EVA's overlapping user/kernel address spaces. Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Markos Chandras Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Cc: # 3.15.x- Patchwork: https://patchwork.linux-mips.org/patch/10812/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 28d6d93..a71da57 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -152,6 +152,31 @@ .set noreorder bltz k0, 8f move k1, sp +#ifdef CONFIG_EVA + /* + * Flush interAptiv's Return Prediction Stack (RPS) by writing + * EntryHi. Toggling Config7.RPS is slower and less portable. + * + * The RPS isn't automatically flushed when exceptions are + * taken, which can result in kernel mode speculative accesses + * to user addresses if the RPS mispredicts. That's harmless + * when user and kernel share the same address space, but with + * EVA the same user segments may be unmapped to kernel mode, + * even containing sensitive MMIO regions or invalid memory. + * + * This can happen when the kernel sets the return address to + * ret_from_* and jr's to the exception handler, which looks + * more like a tail call than a function call. If nested calls + * don't evict the last user address in the RPS, it will + * mispredict the return and fetch from a user controlled + * address into the icache. + * + * More recent EVA-capable cores with MAAR to restrict + * speculative accesses aren't affected. + */ + MFC0 k0, CP0_ENTRYHI + MTC0 k0, CP0_ENTRYHI +#endif .set reorder /* Called from user mode, new stack. */ get_saved_sp -- cgit v0.10.2 From a4504755e7dc8d43ed2a934397032691cd03adf7 Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Wed, 17 Jun 2015 17:12:50 +0100 Subject: MIPS: Replace add and sub instructions in relocate_kernel.S with addiu Fixes the assembler errors generated when compiling a MIPS R6 kernel with CONFIG_KEXEC on, by replacing the offending add and sub instructions with addiu instructions. Build errors: arch/mips/kernel/relocate_kernel.S: Assembler messages: arch/mips/kernel/relocate_kernel.S:27: Error: invalid operands `dadd $16,$16,8' arch/mips/kernel/relocate_kernel.S:64: Error: invalid operands `dadd $20,$20,8' arch/mips/kernel/relocate_kernel.S:65: Error: invalid operands `dadd $18,$18,8' arch/mips/kernel/relocate_kernel.S:66: Error: invalid operands `dsub $22,$22,1' scripts/Makefile.build:294: recipe for target 'arch/mips/kernel/relocate_kernel.o' failed Signed-off-by: James Cowgill Cc: # 4.0+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10558/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 74bab9d..c6bbf21 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -24,7 +24,7 @@ LEAF(relocate_new_kernel) process_entry: PTR_L s2, (s0) - PTR_ADD s0, s0, SZREG + PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not @@ -61,9 +61,9 @@ copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) - PTR_ADD s4, s4, SZREG - PTR_ADD s2, s2, SZREG - LONG_SUB s6, s6, 1 + PTR_ADDIU s4, s4, SZREG + PTR_ADDIU s2, s2, SZREG + LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry -- cgit v0.10.2 From e13c42ecbe580509451e021ba2586871e5b47640 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 3 Aug 2015 15:37:24 +0530 Subject: ARCv2: Fix the peripheral address space detection With HS 2.1 release, the peripheral space register no longer contains the uncached space specifics, causing the kernel to panic early on. So read the newer NON VOLATILE AUX register to get that info. Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 070f588..c8f57b8 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -89,11 +89,10 @@ #define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_ST_MISS 9 - /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_INTR_VEC_BASE 0x25 - +#define AUX_NON_VOL 0x5e /* * Floating Pt Registers @@ -240,9 +239,9 @@ struct bcr_extn_xymem { struct bcr_perip { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int start:8, pad2:8, sz:8, pad:8; + unsigned int start:8, pad2:8, sz:8, ver:8; #else - unsigned int pad:8, sz:8, pad2:8, start:8; + unsigned int ver:8, sz:8, pad2:8, start:8; #endif }; diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 18cc015..f2f771b 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void) struct bcr_perip uncached_space; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + unsigned long perip_space; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); @@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void) cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); - BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); + if (uncached_space.ver < 3) + perip_space = uncached_space.start << 24; + else + perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000; + + BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE); READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); -- cgit v0.10.2 From c93e64e91248becd0edb8f01723dff9da890e2ab Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 16 Jan 2015 11:32:51 -0500 Subject: usb: udc: core: add device_del() call to error pathway This patch fixes a bug in the error pathway of usb_add_gadget_udc_release() in udc-core.c. If the udc registration fails, the gadget registration is not fully undone; there's a put_device(&gadget->dev) call but no device_del(). CC: Acked-by: Peter Chen Signed-off-by: Alan Stern Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 362ee8a..89ed5e7 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -323,6 +323,7 @@ err4: err3: put_device(&udc->dev); + device_del(&gadget->dev); err2: put_device(&gadget->dev); -- cgit v0.10.2 From 6b5e38dccd2c72b62c57681861ba3594117d993e Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 10 Jul 2015 15:35:00 +0900 Subject: thermal: Drop owner assignment from platform_driver platform_driver does not need to set an owner because platform_driver_register() will set it. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Zhang Rui diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index d5dd357..b49f97c 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, static struct platform_driver hisi_thermal_driver = { .driver = { .name = "hisi_thermal", - .owner = THIS_MODULE, .pm = &hisi_thermal_pm_ops, .of_match_table = of_hisi_thermal_match, }, -- cgit v0.10.2 From 5413fcdbe9e7ca32ce3f00fd5251dfa560b7484b Mon Sep 17 00:00:00 2001 From: Salvatore Mesoraca Date: Mon, 3 Aug 2015 12:40:51 +0200 Subject: Adding YAMA hooks also when YAMA is not stacked. Without this patch YAMA will not work at all if it is chosen as the primary LSM instead of being "stacked". Signed-off-by: Salvatore Mesoraca Acked-by: Kees Cook Signed-off-by: James Morris diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 9ed3250..5ebb896 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -406,6 +406,7 @@ static __init int yama_init(void) */ if (!security_module_enable("yama")) return 0; + yama_add_hooks(); #endif pr_info("Yama: becoming mindful.\n"); -- cgit v0.10.2 From a094935e4ebdf5c22c45b8aeeb2d88e9e8c53dbf Mon Sep 17 00:00:00 2001 From: Bard Liao Date: Mon, 3 Aug 2015 12:17:39 +0800 Subject: ASoC: rt5645: Fix lost pin setting for DMIC1 I2S2_DAC pin can be used for I2S or GPIO. We should set it as GPIO if we use GPIO5 as DMIC1 data pin. Signed-off-by: Bard Liao Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index e9cc3aa..961bd7e 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, break; case RT5645_DMIC_DATA_GPIO5: + regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, + RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO); regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 0353a6a..278bb9f 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h @@ -1693,6 +1693,10 @@ #define RT5645_GP6_PIN_SFT 6 #define RT5645_GP6_PIN_GPIO6 (0x0 << 6) #define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) +#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4) +#define RT5645_I2S2_DAC_PIN_SFT 4 +#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4) +#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4) #define RT5645_GP8_PIN_MASK (0x1 << 3) #define RT5645_GP8_PIN_SFT 3 #define RT5645_GP8_PIN_GPIO8 (0x0 << 3) -- cgit v0.10.2 From 3576fd794b38306e196498ac54bb3b21c32e1ae4 Mon Sep 17 00:00:00 2001 From: Glenn Griffin Date: Mon, 3 Aug 2015 09:56:54 -0700 Subject: openvswitch: Fix L4 checksum handling when dealing with IP fragments openvswitch modifies the L4 checksum of a packet when modifying the ip address. When an IP packet is fragmented only the first fragment contains an L4 header and checksum. Prior to this change openvswitch would modify all fragments, modifying application data in non-first fragments, causing checksum failures in the reassembled packet. Signed-off-by: Glenn Griffin Acked-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 8a8c0b8..ee34f47 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, return 0; } -static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) +static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, + __be32 addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); + if (nh->frag_off & htons(IP_OFFSET)) + return; + if (nh->protocol == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, - *addr, new_addr, 1); + addr, new_addr, 1); } else if (nh->protocol == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&uh->check, skb, - *addr, new_addr, 1); + addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } +} +static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, + __be32 *addr, __be32 new_addr) +{ + update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); *addr = new_addr; -- cgit v0.10.2 From c20bc5502d151ca731ebc672b95aa77e2bf32c8c Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 30 Jul 2015 11:01:13 -0700 Subject: Input: turbografx - fix potential out of bound access Patch 17dd3f0f7aa7: "[PATCH] drivers/input/joystick: convert to dynamic input_dev allocation" from Sep 15, 2005, leads to the following static checker warning: drivers/input/joystick/turbografx.c:235 tgfx_probe() error: buffer overflow 'tgfx_buttons' 5 <= 5 drivers/input/joystick/turbografx.c 195 for (i = 0; i < n_devs; i++) { 196 if (n_buttons[i] < 1) 197 continue; 198 199 if (n_buttons[i] > 6) { ^^^^^^^^^^^^^^^^ Possibly off by one. >= 6. Let's change the upper value to ARRAY_SIZE(tgfx_buttons) to ensure we do not reach past the end of the array. Reported-by: Dan Carpenter Reviewed-by: Dan Carpenter Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 27b6a3c..891797a 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) if (n_buttons[i] < 1) continue; - if (n_buttons[i] > 6) { + if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) { printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); err = -EINVAL; goto err_unreg_devs; -- cgit v0.10.2 From b6e26546cc08e870da5413a3fcccc100eb2192c6 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 3 Aug 2015 13:29:29 -0700 Subject: Input: axp20x-pek - add module alias Add a proper module alias so the driver can be autoloaded when the parent axp20x mfd driver registers its cells. Signed-off-by: Chen-Yu Tsai Acked-by: Carlo Caione Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 10e140a..1ac898d 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver); MODULE_DESCRIPTION("axp20x Power Button"); MODULE_AUTHOR("Carlo Caione "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:axp20x-pek"); -- cgit v0.10.2 From 073e570d7c2caae9910a993d56f340be4548a4a8 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 3 Aug 2015 14:06:24 -0700 Subject: Input: alps - only Dell laptops have separate button bits for v2 dualpoint sticks It turns out that only Dell laptops have the separate button bits for v2 dualpoint sticks and that commit 92bac83dd79e ("Input: alps - non interleaved V2 dualpoint has separate stick button bits") causes regressions on Toshiba laptops. This commit adds a check for Dell laptops to the code for handling these extra button bits, fixing this regression. This patch has been tested on a Dell Latitude D620 to make sure that it does not reintroduce the original problem. Reported-and-tested-by: Douglas Christman Cc: stable@vger.kernel.org Signed-off-by: Hans de Goede Signed-off-by: Dmitry Torokhov diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index e301887d..765d99c 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt @@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for -the DualPoint Stick. For non interleaved dualpoint devices the pointingstick -buttons get reported separately in the PSM, PSR and PSL bits. +the DualPoint Stick. The M, R and L bits signal the combined status of both +the pointingstick and touchpad buttons, except for Dell dualpoint devices +where the pointingstick buttons get reported separately in the PSM, PSR +and PSL bits. Dualpoint device -- interleaved packet format --------------------------------------------- diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 113d6f1..4d24686 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "psmouse.h" #include "alps.h" @@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ +#define ALPS_DELL 0x100 /* device is a Dell laptop */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { @@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse) return; } - /* Non interleaved V2 dualpoint has separate stick button bits */ + /* Dell non interleaved V2 dualpoint has separate stick button bits */ if (priv->proto_version == ALPS_PROTO_V2 && - priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { + priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) { left |= packet[0] & 1; right |= packet[0] & 2; middle |= packet[0] & 4; @@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse, priv->byte0 = protocol->byte0; priv->mask0 = protocol->mask0; priv->flags = protocol->flags; + if (dmi_name_in_vendors("Dell")) + priv->flags |= ALPS_DELL; priv->x_max = 2000; priv->y_max = 1400; -- cgit v0.10.2 From 636dba8e12d797357b2063981476390f11262c08 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Thu, 30 Jul 2015 17:12:20 -0700 Subject: act_mirred: avoid calling tcf_hash_release() when binding When we share an action within a filter, the bind refcnt should increase, therefore we should not call tcf_hash_release(). Cc: Jamal Hadi Salim Cc: Daniel Borkmann Signed-off-by: Cong Wang Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index a42a3b2..2685450 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, return ret; ret = ACT_P_CREATED; } else { + if (bind) + return 0; if (!ovr) { tcf_hash_release(a, bind); return -EEXIST; -- cgit v0.10.2 From 7895086afde2a05fa24a0e410d8e6b75ca7c8fdd Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Mon, 3 Aug 2015 16:07:48 +0300 Subject: xhci: fix off by one error in TRB DMA address boundary check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to check that a TRB is part of the current segment before calculating its DMA address. Previously a ring segment didn't use a full memory page, and every new ring segment got a new memory page, so the off by one error in checking the upper bound was never seen. Now that we use a full memory page, 256 TRBs (4096 bytes), the off by one didn't catch the case when a TRB was the first element of the next segment. This is triggered if the virtual memory pages for a ring segment are next to each in increasing order where the ring buffer wraps around and causes errors like: [ 106.398223] xhci_hcd 0000:00:14.0: ERROR Transfer event TRB DMA ptr not part of current TD ep_index 0 comp_code 1 [ 106.398230] xhci_hcd 0000:00:14.0: Looking for event-dma fffd3000 trb-start fffd4fd0 trb-end fffd5000 seg-start fffd4000 seg-end fffd4ff0 The trb-end address is one outside the end-seg address. Cc: Tested-by: Arkadiusz Miśkiewicz Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6a8fc52..32f4d56 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return 0; /* offset in TRBs */ segment_offset = trb - seg->trbs; - if (segment_offset > TRBS_PER_SEGMENT) + if (segment_offset >= TRBS_PER_SEGMENT) return 0; return seg->dma + (segment_offset * sizeof(*trb)); } -- cgit v0.10.2 From ffe5adcb7661d94e952d6b5ed7f493cb4ef0c7bc Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 3 Aug 2015 16:07:49 +0300 Subject: drivers/usb: Delete XHCI command timer if necessary When xhci_mem_cleanup() is called, it's possible that the command timer isn't initialized and scheduled. For those cases, to delete the command timer causes soft-lockup as below stack dump shows. The patch avoids deleting the command timer if it's not scheduled with the help of timer_pending(). NMI watchdog: BUG: soft lockup - CPU#40 stuck for 23s! [kworker/40:1:8140] : NIP [c000000000150b30] lock_timer_base.isra.34+0x90/0xa0 LR [c000000000150c24] try_to_del_timer_sync+0x34/0xa0 Call Trace: [c000000f67c975e0] [c0000000015b84f8] mon_ops+0x0/0x8 (unreliable) [c000000f67c97620] [c000000000150c24] try_to_del_timer_sync+0x34/0xa0 [c000000f67c97660] [c000000000150cf0] del_timer_sync+0x60/0x80 [c000000f67c97690] [c00000000070ac0c] xhci_mem_cleanup+0x5c/0x5e0 [c000000f67c97740] [c00000000070c2e8] xhci_mem_init+0x1158/0x13b0 [c000000f67c97860] [c000000000700978] xhci_init+0x88/0x110 [c000000f67c978e0] [c000000000701644] xhci_gen_setup+0x2b4/0x590 [c000000f67c97970] [c0000000006d4410] xhci_pci_setup+0x40/0x190 [c000000f67c979f0] [c0000000006b1af8] usb_add_hcd+0x418/0xba0 [c000000f67c97ab0] [c0000000006cb15c] usb_hcd_pci_probe+0x1dc/0x5c0 [c000000f67c97b50] [c0000000006d3ba4] xhci_pci_probe+0x64/0x1f0 [c000000f67c97ba0] [c0000000004fe9ac] local_pci_probe+0x6c/0x130 [c000000f67c97c30] [c0000000000e5ce8] work_for_cpu_fn+0x38/0x60 [c000000f67c97c60] [c0000000000eacb8] process_one_work+0x198/0x470 [c000000f67c97cf0] [c0000000000eb6ac] worker_thread+0x37c/0x5a0 [c000000f67c97d80] [c0000000000f2730] kthread+0x110/0x130 [c000000f67c97e30] [c000000000009660] ret_from_kernel_thread+0x5c/0x7c Cc: Reported-by: Priya M. A Signed-off-by: Gavin Shan Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3e442f7..9a8c936 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int size; int i, j, num_ports; - del_timer_sync(&xhci->cmd_timer); + if (timer_pending(&xhci->cmd_timer)) + del_timer_sync(&xhci->cmd_timer); /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); -- cgit v0.10.2 From 468b732b6f76b138c0926eadf38ac88467dcd271 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 1 Aug 2015 15:33:26 +0300 Subject: rds: fix an integer overflow test in rds_info_getsockopt() "len" is a signed integer. We check that len is not negative, so it goes from zero to INT_MAX. PAGE_SIZE is unsigned long so the comparison is type promoted to unsigned long. ULONG_MAX - 4095 is a higher than INT_MAX so the condition can never be true. I don't know if this is harmful but it seems safe to limit "len" to INT_MAX - 4095. Fixes: a8c879a7ee98 ('RDS: Info and stats') Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/net/rds/info.c b/net/rds/info.c index 9a6b4f6..140a44a 100644 --- a/net/rds/info.c +++ b/net/rds/info.c @@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, /* check for all kinds of wrapping and the like */ start = (unsigned long)optval; - if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { + if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { ret = -EINVAL; goto out; } -- cgit v0.10.2 From 2fc09962e24ace45154d0c16024f1eb15700f3e8 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 3 Aug 2015 11:18:12 +0800 Subject: 3c59x: Fix resource leaks in vortex_open When vortex_up is failed, the skb buffers allocated by __netdev_alloc_skb in vortex_open are not released, which may cause resource leaks. This bug has been submitted before. This patch modifies the error handling code to fix it. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2d1ce3c..753887d 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev) vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { - int j; pr_emerg("%s: no memory for rx ring\n", dev->name); - for (j = 0; j < i; j++) { - if (vp->rx_skbuff[j]) { - dev_kfree_skb(vp->rx_skbuff[j]); - vp->rx_skbuff[j] = NULL; - } - } retval = -ENOMEM; - goto err_free_irq; + goto err_free_skb; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); @@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev) if (!retval) goto out; -err_free_irq: +err_free_skb: + for (i = 0; i < RX_RING_SIZE; i++) { + if (vp->rx_skbuff[i]) { + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } free_irq(dev->irq, dev); err: if (vortex_debug > 1) -- cgit v0.10.2 From 1f17124006b65482d9084c01e252b59dbca8db8f Mon Sep 17 00:00:00 2001 From: Malcolm Priestley Date: Sun, 2 Aug 2015 12:34:46 +0100 Subject: staging: vt6655: vnt_bss_info_changed check conf->beacon_rate is not NULL conf->beacon_rate can be NULL on association. So check conf->beacon_rate BSS_CHANGED_BEACON_INFO needs to flagged in changed as the beacon_rate will appear later. Signed-off-by: Malcolm Priestley Cc: # v3.19+ Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index b0c8e23..69bdc8f 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { - if (conf->assoc) { + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) && + priv->op_mode != NL80211_IFTYPE_AP) { + if (conf->assoc && conf->beacon_rate) { CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, conf->sync_tsf); -- cgit v0.10.2 From bd4aaf8f9b85d6b2df3231fd62b219ebb75d3568 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 3 Aug 2015 09:54:58 -0400 Subject: dm: fix dm_merge_bvec regression on 32 bit systems A DM regression on 32 bit systems was reported against v4.2-rc3 here: https://lkml.org/lkml/2015/7/29/401 Fix this by reverting both commit 1c220c69 ("dm: fix casting bug in dm_merge_bvec()") and 148e51ba ("dm: improve documentation and code clarity in dm_merge_bvec"). This combined revert is done to eliminate the possibility of a partial revert in stable@ kernels. In hindsight the correct fix, at the time 1c220c69 was applied to fix the regression that 148e51ba introduced, should've been to simply revert 148e51ba. Reported-by: Josh Boyer Tested-by: Adam Williamson Acked-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # 3.19+ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ab37ae1..0d7ab20 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q, struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; - sector_t max_sectors, max_size = 0; + sector_t max_sectors; + int max_size = 0; if (unlikely(!map)) goto out; @@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q, * Find maximum amount of I/O that won't need splitting */ max_sectors = min(max_io_len(bvm->bi_sector, ti), - (sector_t) queue_max_sectors(q)); + (sector_t) BIO_MAX_SECTORS); max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; - - /* - * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t - * to the targets' merge function since it holds sectors not bytes). - * Just doing this as an interim fix for stable@ because the more - * comprehensive cleanup of switching to sector_t will impact every - * DM target that implements a ->merge hook. - */ - if (max_size > INT_MAX) - max_size = INT_MAX; + if (max_size < 0) + max_size = 0; /* * merge_bvec_fn() returns number of bytes @@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q, * max is precomputed maximal io size */ if (max_size && ti->type->merge) - max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); + max_size = ti->type->merge(ti, bvm, biovec, max_size); /* * If the target doesn't support merge method and some of the devices - * provided their merge_bvec method (we know this by looking for the - * max_hw_sectors that dm_set_device_limits may set), then we can't - * allow bios with multiple vector entries. So always set max_size - * to 0, and the code below allows just one page. + * provided their merge_bvec method (we know this by looking at + * queue_max_hw_sectors), then we can't allow bios with multiple vector + * entries. So always set max_size to 0, and the code below allows + * just one page. */ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) max_size = 0; -- cgit v0.10.2 From 6de7abfbad1c6a45893a47a17c2ac91b551aa90d Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 3 Aug 2015 18:27:56 +0530 Subject: ARCv2: [axs103_smp] Reduce clk for Quad FPGA configs Signed-off-by: Vineet Gupta diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 99f7da5..e7769c3 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od) static void __init axs103_early_init(void) { + /* + * AXS103 configurations for SMP/QUAD configurations share device tree + * which defaults to 90 MHz. However recent failures of Quad config + * revealed P&R timing violations so clamp it down to safe 50 MHz + * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack + * + * This hack is really hacky as of now. Fix it properly by getting the + * number of cores as return value of platform's early SMP callback + */ +#ifdef CONFIG_ARC_MCIP + unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; + if (num_cores > 2) + arc_set_core_freq(50 * 1000000); +#endif + switch (arc_get_core_freq()/1000000) { case 33: axs103_set_freq(1, 1, 1); -- cgit v0.10.2 From f5959cb0c34c5c7f9f387e5d8fe1ec831ccb42ac Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 29 Jul 2015 19:20:58 +0530 Subject: Revert "ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock" Extended testing of quad core configuration revealed that this fix was insufficient. Specifically LTP open posix shm_op/23-1 would cause the hardware livelock in llock/scond loop in update_cpu_load_active() So remove this and make way for a proper workaround This reverts commit a5c8b52abe677977883655166796f167ef1e0084. Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 03484cb..20b7dc1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -23,21 +23,13 @@ #define atomic_set(v, i) (((v)->counter) = (i)) -#ifdef CONFIG_ISA_ARCV2 -#define PREFETCHW " prefetchw [%1] \n" -#else -#define PREFETCHW -#endif - #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned int temp; \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ + "1: llock %0, [%1] \n" \ " " #asm_op " %0, %0, %2 \n" \ " scond %0, [%1] \n" \ " bnz 1b \n" \ @@ -58,9 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ smp_mb(); \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ + "1: llock %0, [%1] \n" \ " " #asm_op " %0, %0, %2 \n" \ " scond %0, [%1] \n" \ " bnz 1b \n" \ -- cgit v0.10.2 From 8ac0665fb6ff125efc4df08772317f7311a95ce5 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 21 Jul 2015 12:05:42 +0300 Subject: ARC: refactor atomic inline asm operands with symbolic names This reduces the diff in forth-coming patches and also helps understand better the incremental changes to inline asm. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 20b7dc1..3dd36c1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -26,22 +26,23 @@ #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ \ __asm__ __volatile__( \ - "1: llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " bnz 1b \n" \ + : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ + : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ + [i] "ir" (i) \ : "cc"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -50,17 +51,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ smp_mb(); \ \ __asm__ __volatile__( \ - "1: llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " bnz 1b \n" \ + : [val] "=&r" (val) \ + : [ctr] "r" (&v->counter), \ + [i] "ir" (i) \ : "cc"); \ \ smp_mb(); \ \ - return temp; \ + return val; \ } #else /* !CONFIG_ARC_HAS_LLSC */ -- cgit v0.10.2 From ae7eae9e031206bde8645d038c76ad89e7d3fbcb Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 14 Jul 2015 17:55:05 +0530 Subject: ARC: LLOCK/SCOND based spin_lock Current spin_lock uses EXchange instruction to implement the atomic test and set of lock location (reads orig value and ST 1). This however forces the cacheline into exclusive state (because of the ST) and concurrent loops in multiple cores will bounce the line around between cores. Instead, use LLOCK/SCOND to implement the atomic test and set which is better as line is in shared state while lock is spinning on LLOCK The real motivation of this change however is to make way for future changes in atomics to implement delayed retry (with backoff). Initial experiment with delayed retry in atomics combined with orig EX based spinlock was a total disaster (broke even LMBench) as struct sock has a cache line sharing an atomic_t and spinlock. The tight spinning on lock, caused the atomic retry to keep backing off such that it would never finish. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index e1651df..4f6c90a 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -18,9 +18,68 @@ #define arch_spin_unlock_wait(x) \ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) +#ifdef CONFIG_ARC_HAS_LLSC + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " mov %[got_it], 1 \n" + "4: \n" + " \n" + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +#else /* !CONFIG_ARC_HAS_LLSC */ + static inline void arch_spin_lock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; /* * This smp_mb() is technically superfluous, we only need the one @@ -33,7 +92,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" " breq %0, %2, 1b \n" - : "+&r" (tmp) + : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "memory"); @@ -48,26 +107,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) smp_mb(); } +/* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); smp_mb(); - return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); + return (val == __ARCH_SPIN_LOCK_UNLOCKED__); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; /* * RELEASE barrier: given the instructions avail on ARCv2, full barrier @@ -77,7 +137,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); @@ -88,6 +148,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) smp_mb(); } +#endif + /* * Read-write spinlocks, allowing multiple readers but only one writer. * -- cgit v0.10.2 From 69cbe630f54ec02efe47fdb9e257e617161da370 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 16 Jul 2015 10:31:45 +0530 Subject: ARC: LLOCK/SCOND based rwlock With LLOCK/SCOND, the rwlock counter can be atomically updated w/o need for a guarding spin lock. This in turn elides the EXchange instruction based spinning which causes the cacheline transition to exclusive state and concurrent spinning across cores would cause the line to keep bouncing around. LLOCK/SCOND based implementation is superior as spinning on LLOCK keeps the cacheline in shared state. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 4f6c90a..9fd5a02 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -75,6 +75,164 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) smp_mb(); } +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + smp_mb(); + + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + + smp_mb(); +} + #else /* !CONFIG_ARC_HAS_LLSC */ static inline void arch_spin_lock(arch_spinlock_t *lock) @@ -148,23 +306,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) smp_mb(); } -#endif - /* * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * * The spinlock itself is contained in @counter and access to it is * serialized with @lock_mutex. - * - * Unfair locking as Writers could be starved indefinitely by Reader(s) */ -/* Would read_trylock() succeed? */ -#define arch_read_can_lock(x) ((x)->counter > 0) - -/* Would write_trylock() succeed? */ -#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) - /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { @@ -235,6 +384,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) arch_spin_unlock(&(rw->lock_mutex)); } +#endif + +#define arch_read_can_lock(x) ((x)->counter > 0) +#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h index 662627c..4e1ef5f 100644 --- a/arch/arc/include/asm/spinlock_types.h +++ b/arch/arc/include/asm/spinlock_types.h @@ -26,7 +26,9 @@ typedef struct { */ typedef struct { volatile unsigned int counter; +#ifndef CONFIG_ARC_HAS_LLSC arch_spinlock_t lock_mutex; +#endif } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 -- cgit v0.10.2 From e78fdfef84be13a5c2b8276e12203cdf24778596 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 14 Jul 2015 19:50:18 +0530 Subject: ARCv2: spinlock/rwlock/atomics: Delayed retry of failed SCOND with exponential backoff This is to workaround the llock/scond livelock HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping coherency transactions in the SCU. The exclusive line state keeps rotating among contenting cores leading to a never ending cycle. So break the cycle by deferring the retry of failed exclusive access (SCOND). The actual delay needed is function of number of contending cores as well as the unrelated coherency traffic from other cores. To keep the code simple, start off with small delay of 1 which would suffice most cases and in case of contention double the delay. Eventually the delay is sufficient such that the coherency pipeline is drained, thus a subsequent exclusive access would succeed. Link: http://lkml.kernel.org/r/1438612568-28265-1-git-send-email-vgupta@synopsys.com Acked-by: Peter Zijlstra (Intel) Signed-off-by: Vineet Gupta diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index a5fccdf..bd4670d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -365,6 +365,11 @@ config ARC_HAS_LLSC default y depends on !ARC_CANT_LLSC +config ARC_STAR_9000923308 + bool "Workaround for llock/scond livelock" + default y + depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC + config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" default y diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 3dd36c1..629dfd0 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -23,17 +23,51 @@ #define atomic_set(v, i) (((v)->counter) = (i)) +#ifdef CONFIG_ARC_STAR_9000923308 + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay = 1, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " bz 4f \n" \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \ + " mov.z %[delay], 1 \n" /* handle overflow */ \ + " b 1b \n" /* start over */ \ + "4: ; --- success --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ + +#else /* !CONFIG_ARC_STAR_9000923308 */ + +#define SCOND_FAIL_RETRY_VAR_DEF + +#define SCOND_FAIL_RETRY_ASM \ + " bnz 1b \n" \ + +#define SCOND_FAIL_RETRY_VARS + +#endif + #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ - unsigned int val; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ - " bnz 1b \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ + SCOND_FAIL_RETRY_VARS \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ : "cc"); \ @@ -42,7 +76,8 @@ static inline void atomic_##op(int i, atomic_t *v) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ - unsigned int val; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -54,8 +89,11 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ - " bnz 1b \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ : [val] "=&r" (val) \ + SCOND_FAIL_RETRY_VARS \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc"); \ @@ -142,6 +180,9 @@ ATOMIC_OP(and, &=, and) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS /** * __atomic_add_unless - add unless the number is a given value diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 9fd5a02..2a84525 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -20,6 +20,11 @@ #ifdef CONFIG_ARC_HAS_LLSC +/* + * A normal LLOCK/SCOND based system, w/o need for livelock workaround + */ +#ifndef CONFIG_ARC_STAR_9000923308 + static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int val; @@ -233,6 +238,294 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) smp_mb(); } +#else /* CONFIG_ARC_STAR_9000923308 */ + +/* + * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping + * coherency transactions in the SCU. The exclusive line state keeps rotating + * among contenting cores leading to a never ending cycle. So break the cycle + * by deferring the retry of failed exclusive access (SCOND). The actual delay + * needed is function of number of contending cores as well as the unrelated + * coherency traffic from other cores. To keep the code simple, start off with + * small delay of 1 which would suffice most cases and in case of contention + * double the delay. Eventually the delay is sufficient such that the coherency + * pipeline is drained, thus a subsequent exclusive access would succeed. + */ + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \ + " mov.z %[delay], 1 \n" /* handle overflow */ \ + " b 1b \n" /* start over */ \ + " \n" \ + "4: ; --- done --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " scond %[UNLOCKED], [%[rwlock]]\n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS + +#endif /* CONFIG_ARC_STAR_9000923308 */ + #else /* !CONFIG_ARC_HAS_LLSC */ static inline void arch_spin_lock(arch_spinlock_t *lock) diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index f2f771b..cabde9d 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -336,6 +336,10 @@ static void arc_chk_core_config(void) pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); else if (!cpu->extn.fpu_dp && fpu_enabled) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); + + if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) + panic("llock/scond livelock workaround missing\n"); } /* -- cgit v0.10.2 From b89aa12c177477e34caa722818536fb5d0bffd76 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 21 Jul 2015 18:46:53 +0300 Subject: ARCv2: spinlock/rwlock: Reset retry delay when starting a new spin-wait cycle The previous commit for delayed retry of SCOND needs some fine tuning for spin locks. The backoff from delayed retry in conjunction with spin looping of lock itself can potentially cause the delay counter to reach high values. So to provide fairness to any lock operation, after a lock "seems" available (i.e. just before first SCOND try0, reset the delay counter back to starting value of 1 Essentially reset delay to 1 for a new spin-wait-loop-acquire cycle. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 2a84525..7071fc0 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -279,7 +279,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "0: mov %[delay], 1 \n" "1: llock %[val], [%[slock]] \n" - " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ + " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ " scond %[LOCKED], [%[slock]] \n" /* acquire */ " bz 4f \n" /* done */ " \n" @@ -358,7 +358,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) __asm__ __volatile__( "0: mov %[delay], 1 \n" "1: llock %[val], [%[rwlock]] \n" - " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ + " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ " sub %[val], %[val], 1 \n" /* reader lock */ " scond %[val], [%[rwlock]] \n" " bz 4f \n" /* done */ @@ -427,7 +427,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) __asm__ __volatile__( "0: mov %[delay], 1 \n" "1: llock %[val], [%[rwlock]] \n" - " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ + " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ " mov %[val], %[WR_LOCKED] \n" " scond %[val], [%[rwlock]] \n" " bz 4f \n" -- cgit v0.10.2 From 9b06dc939489152b583131f49929ed1c6ae83740 Mon Sep 17 00:00:00 2001 From: Jeeja KP Date: Tue, 4 Aug 2015 09:28:38 +0530 Subject: ALSA: HDA: Fix stream assignment for host in decoupled mode This fixes issue in assigning host stream in case of decoupled mode. The check to verify if the stream is already in use was wrong so fix that Signed-off-by: Jeeja KP Signed-off-by: Vinod Koul Signed-off-by: Takashi Iwai diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index f8ffbdb..3de47dd 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c @@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus, if (stream->direction != substream->stream) continue; - if (stream->opened) { + if (!stream->opened) { if (!hstream->decoupled) snd_hdac_ext_stream_decouple(ebus, hstream, true); res = hstream; -- cgit v0.10.2 From 5d942ce63c8fd98794a8ba9af559925c8432a052 Mon Sep 17 00:00:00 2001 From: Jeeja KP Date: Tue, 4 Aug 2015 09:28:39 +0530 Subject: ALSA: HDA: Dont check return for snd_hdac_chip_readl The snd_hdac_chip_readl return can never be less than zeros, so no point in checking for the return value This fixes following static checker warnings in snd_hdac_ext_bus_parse_capabilities sound/hda/ext/hdac_ext_controller.c:47 snd_hdac_ext_bus_parse_capabilities() warn: unsigned 'offset' is never less than zero. sound/hda/ext/hdac_ext_controller.c:54 snd_hdac_ext_bus_parse_capabilities() warn: unsigned 'cur_cap' is never less than zero. Signed-off-by: Jeeja KP Reported-by: Dan Carpenter Signed-off-by: Vinod Koul Signed-off-by: Takashi Iwai diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c index b2da19b..358f161 100644 --- a/sound/hda/ext/hdac_ext_controller.c +++ b/sound/hda/ext/hdac_ext_controller.c @@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus) offset = snd_hdac_chip_readl(bus, LLCH); - if (offset < 0) - return -EIO; - /* Lets walk the linked capabilities list */ do { cur_cap = _snd_hdac_chip_read(l, bus, offset); - if (cur_cap < 0) - return -EIO; - dev_dbg(bus->dev, "Capability version: 0x%x\n", ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); -- cgit v0.10.2 From 10e2eb878f3ca07ac2f05fa5ca5e6c4c9174a27a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 1 Aug 2015 12:14:33 +0200 Subject: udp: fix dst races with multicast early demux MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Multicast dst are not cached. They carry DST_NOCACHE. As mentioned in commit f8864972126899 ("ipv4: fix dst race in sk_dst_get()"), these dst need special care before caching them into a socket. Caching them is allowed only if their refcnt was not 0, ie we must use atomic_inc_not_zero() Also, we must use READ_ONCE() to fetch sk->sk_rx_dst, as mentioned in commit d0c294c53a771 ("tcp: prevent fetching dst twice in early demux code") Fixes: 421b3885bf6d ("udp: ipv4: Add udp early demux") Tested-by: Gregory Hoggarth Signed-off-by: Eric Dumazet Reported-by: Gregory Hoggarth Reported-by: Alex Gartrell Cc: Michal Kubeček Signed-off-by: David S. Miller diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 83aa604..1b8c5ba 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = sk->sk_rx_dst; + dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); - if (dst) - skb_dst_set_noref(skb, dst); + if (dst) { + /* DST_NOCACHE can not be used without taking a reference */ + if (dst->flags & DST_NOCACHE) { + if (likely(atomic_inc_not_zero(&dst->__refcnt))) + skb_dst_set(skb, dst); + } else { + skb_dst_set_noref(skb, dst); + } + } } int udp_rcv(struct sk_buff *skb) -- cgit v0.10.2 From 2475b22526d70234ecfe4a1ff88aed69badefba9 Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Mon, 3 Aug 2015 15:38:03 +0100 Subject: xen-netback: Allocate fraglist early to avoid complex rollback Determine if a fraglist is needed in the tx path, and allocate it if necessary before setting up the copy and map operations. Otherwise, undoing the copy and map operations is tricky. This fixes a use-after-free: if allocating the fraglist failed, the copy and map operations that had been set up were still executed, writing over the data area of a freed skb. Signed-off-by: Ross Lagerwall Signed-off-by: David S. Miller diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 7d50711..1b406e7 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, struct sk_buff *skb, struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop) + struct gnttab_map_grant_ref *gop, + unsigned int frag_overflow, + struct sk_buff *nskb) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; int start; pending_ring_idx_t index; - unsigned int nr_slots, frag_overflow = 0; + unsigned int nr_slots; - /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. - */ - if (shinfo->nr_frags > MAX_SKB_FRAGS) { - frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; - BUG_ON(frag_overflow > MAX_SKB_FRAGS); - shinfo->nr_frags = MAX_SKB_FRAGS; - } nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ @@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que } if (frag_overflow) { - struct sk_buff *nskb = xenvif_alloc_skb(0); - if (unlikely(nskb == NULL)) { - if (net_ratelimit()) - netdev_err(queue->vif->dev, - "Can't allocate the frag_list skb.\n"); - return NULL; - } shinfo = skb_shinfo(nskb); frags = shinfo->frags; @@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; - struct sk_buff *skb; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops; + struct sk_buff *skb, *nskb; int ret; + unsigned int frag_overflow; while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; @@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) + skb_shinfo(skb)->nr_frags++; + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + frag_overflow = 0; + nskb = NULL; + if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; + nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + kfree_skb(skb); + xenvif_tx_err(queue, &txreq, idx); + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + break; + } + } + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; @@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); + kfree_skb(nskb); break; } } @@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (*copy_ops)++; - skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { - skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); @@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->pending_cons++; - request_gop = xenvif_get_requests(queue, skb, txfrags, gop); - if (request_gop == NULL) { - kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); - break; - } - gop = request_gop; + gop = xenvif_get_requests(queue, skb, txfrags, gop, + frag_overflow, nskb); __skb_queue_tail(&queue->tx_queue, skb); -- cgit v0.10.2 From 3b8a684bd6cbc13dfd21ca41814c304e9f27ec7f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 3 Aug 2015 17:24:08 +0200 Subject: drm/atomic-helper: Add an atomice best_encoder callback With legacy helpers all the routing was already set up when calling best_encoder and so could be inspected. But with atomic it's staged, hence we need a new atomic compliant callback for drivers which need to inspect the requested state and can't just decided the best encoder statically. This is needed to fix up i915 dp mst where we need to pick the right encoder depending upon the requested CRTC for the connector. v2: Don't forget to amend the kerneldoc Cc: Chris Wilson Cc: Linus Torvalds Cc: Theodore Ts'o Acked-by: Thierry Reding Reviewed-by: Ander Conselvan de Oliveira Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index aac2122..8694ca9 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } funcs = connector->helper_private; - new_encoder = funcs->best_encoder(connector); + + if (funcs->atomic_best_encoder) + new_encoder = funcs->atomic_best_encoder(connector, + connector_state); + else + new_encoder = funcs->best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index c8fc187..918aa68 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs { * @get_modes: get mode list for this connector * @mode_valid: is this mode valid on the given connector? (optional) * @best_encoder: return the preferred encoder for this connector + * @atomic_best_encoder: atomic version of @best_encoder * * The helper operations are called by the mid-layer CRTC helper. */ @@ -176,6 +177,8 @@ struct drm_connector_helper_funcs { enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); }; extern void drm_helper_disable_unused_functions(struct drm_device *dev); -- cgit v0.10.2 From 459485ad3513bce12a3773f801e4647445062d9e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 3 Aug 2015 17:24:09 +0200 Subject: drm/i915: Fixup dp mst encoder selection In commit 8c7b5ccb729870e606321b3703e2c2e698c49a95 Author: Ander Conselvan de Oliveira Date: Tue Apr 21 17:13:19 2015 +0300 drm/i915: Use atomic helpers for computing changed flags we've switched over to the atomic version to compute the crtc->encoder->connector routing from the i915 variant. That one relies upon the ->best_encoder callback, but the i915-private version relied upon intel_find_encoder. Which didn't matter except for dp mst, where the encoder depends upon the selected crtc. Fix this functional bug by implemented a correct atomic-state based encoder selector for dp mst. Note that we can't get rid of the legacy best_encoder callback since the fbdev emulation uses that still. That means it's incorrect there still, but that's been the case ever since i915 dp mst support was merged so not a regression. Best to fix that by converting fbdev over to atomic too. Cc: Chris Wilson Cc: Linus Torvalds Cc: Theodore Ts'o Reviewed-by: Ander Conselvan de Oliveira Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 6e4cc53..600afdb 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, return MODE_OK; } +static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct intel_crtc *crtc = to_intel_crtc(state->crtc); + + return &intel_dp->mst_encoders[crtc->pipe]->base.base; +} + static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, + .atomic_best_encoder = intel_mst_atomic_best_encoder, .best_encoder = intel_mst_best_encoder, }; -- cgit v0.10.2 From 42639ba554655c280ae6cb72df0522b1201f2961 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 3 Aug 2015 17:24:10 +0200 Subject: drm/dp-mst: Remove debug WARN_ON Apparently been in there since forever and fairly easy to hit when hotplugging really fast. I can do that since my mst hub has a manual button to flick the hpd line for reprobing. The resulting WARNING spam isn't pretty. Cc: Dave Airlie Cc: stable@vger.kernel.org Reviewed-by: Thierry Reding Reviewed-by: Ander Conselvan de Oliveira Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 778bbb6..b0487c9 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1294,7 +1294,6 @@ retry: goto retry; } DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); - WARN(1, "fail\n"); return -EIO; } -- cgit v0.10.2 From 6ea76f3cade4734e73e3da842005820558b8b828 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 3 Aug 2015 17:24:11 +0200 Subject: drm/atomic-helpers: Make encoder picking more robust We've had a few issues with atomic where subtle bugs in the encoder picking logic lead to accidental self-stealing of the encoder, resulting in a NULL connector_state->crtc in update_connector_routing and subsequent. Linus applied some duct-tape for an mst regression in commit 27667f4744fc5a0f3e50910e78740bac5670d18b Author: Linus Torvalds Date: Wed Jul 29 22:18:16 2015 -0700 i915: temporary fix for DP MST docking station NULL pointer dereference But that was incomplete (the code will still oops when debuggin is enabled) and mangled the state even further. So instead WARN and bail out as the more future-proof option. Cc: Theodore Ts'o Cc: Linus Torvalds Reviewed-by: Thierry Reding Reviewed-by: Ander Conselvan de Oliveira Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 8694ca9..9dcc728 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -234,13 +234,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } } + if (WARN_ON(!connector_state->crtc)) + return -EINVAL; + connector_state->best_encoder = new_encoder; - if (connector_state->crtc) { - idx = drm_crtc_index(connector_state->crtc); + idx = drm_crtc_index(connector_state->crtc); - crtc_state = state->crtc_states[idx]; - crtc_state->mode_changed = true; - } + crtc_state = state->crtc_states[idx]; + crtc_state->mode_changed = true; DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", connector->base.id, -- cgit v0.10.2 From 0621809e37936e7c2b3eac9165cf2aad7f9189eb Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 3 Aug 2015 14:57:30 +0900 Subject: HID: hid-input: Fix accessing freed memory during device disconnect During unbinding the driver was dereferencing a pointer to memory already freed by power_supply_unregister(). Driver was freeing its internal description of battery through pointers stored in power_supply structure. However, because the core owns the power supply instance, after calling power_supply_unregister() this memory is freed and the driver cannot access these members. Fix this by storing the pointer to internal description of battery in a local variable before calling power_supply_unregister(), so the pointer remains valid. Signed-off-by: Krzysztof Kozlowski Reported-by: H.J. Lu Fixes: 297d716f6260 ("power_supply: Change ownership from driver to core") Cc: Reviewed-by: Dmitry Torokhov Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 3511bbab..e3c6364 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -462,12 +462,15 @@ out: static void hidinput_cleanup_battery(struct hid_device *dev) { + const struct power_supply_desc *psy_desc; + if (!dev->battery) return; + psy_desc = dev->battery->desc; power_supply_unregister(dev->battery); - kfree(dev->battery->desc->name); - kfree(dev->battery->desc); + kfree(psy_desc->name); + kfree(psy_desc); dev->battery = NULL; } #else /* !CONFIG_HID_BATTERY_STRENGTH */ -- cgit v0.10.2 From fcdf31a7c162de0c93a2bee51df4688ab0a348f8 Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Fri, 31 Jul 2015 14:30:42 +0100 Subject: xen/events/fifo: Handle linked events when closing a port An event channel bound to a CPU that was offlined may still be linked on that CPU's queue. If this event channel is closed and reused, subsequent events will be lost because the event channel is never unlinked and thus cannot be linked onto the correct queue. When a channel is closed and the event is still linked into a queue, ensure that it is unlinked before completing. If the CPU to which the event channel bound is online, spin until the event is handled by that CPU. If that CPU is offline, it can't handle the event, so clear the event queue during the close, dropping the events. This fixes the missing interrupts (and subsequent disk stalls etc.) when offlining a CPU. Signed-off-by: Ross Lagerwall Cc: Signed-off-by: David Vrabel diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 96093ae..1495ecc 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq) irq_free_desc(irq); } -static void xen_evtchn_close(unsigned int port) +static void xen_evtchn_close(unsigned int port, unsigned int cpu) { struct evtchn_close close; + xen_evtchn_op_close(port, cpu); + close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); @@ -544,7 +546,7 @@ out: err: pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); - xen_evtchn_close(evtchn); + xen_evtchn_close(evtchn, NR_CPUS); return 0; } @@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_data *data) return; mask_evtchn(evtchn); - xen_evtchn_close(evtchn); + xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn)); xen_irq_info_cleanup(info); } @@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned int irq) if (VALID_EVTCHN(evtchn)) { unsigned int cpu = cpu_from_irq(irq); - xen_evtchn_close(evtchn); + xen_evtchn_close(evtchn, cpu); switch (type_from_irq(irq)) { case IRQT_VIRQ: diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index ed673e1..6df8aac 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned port) } } +static bool evtchn_fifo_is_linked(unsigned port) +{ + event_word_t *word = event_word_from_port(port); + return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word)); +} + static uint32_t clear_linked(volatile event_word_t *word) { event_word_t new, old, w; @@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned port) static void consume_one_event(unsigned cpu, struct evtchn_fifo_control_block *control_block, - unsigned priority, unsigned long *ready) + unsigned priority, unsigned long *ready, + bool drop) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -313,13 +320,15 @@ static void consume_one_event(unsigned cpu, if (head == 0) clear_bit(priority, ready); - if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) - handle_irq_for_port(port); + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { + if (likely(!drop)) + handle_irq_for_port(port); + } q->head[priority] = head; } -static void evtchn_fifo_handle_events(unsigned cpu) +static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) { struct evtchn_fifo_control_block *control_block; unsigned long ready; @@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(unsigned cpu) while (ready) { q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); - consume_one_event(cpu, control_block, q, &ready); + consume_one_event(cpu, control_block, q, &ready, drop); ready |= xchg(&control_block->ready, 0); } } +static void evtchn_fifo_handle_events(unsigned cpu) +{ + __evtchn_fifo_handle_events(cpu, false); +} + static void evtchn_fifo_resume(void) { unsigned cpu; @@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void) event_array_pages = 0; } +static void evtchn_fifo_close(unsigned port, unsigned int cpu) +{ + if (cpu == NR_CPUS) + return; + + get_online_cpus(); + if (cpu_online(cpu)) { + if (WARN_ON(irqs_disabled())) + goto out; + + while (evtchn_fifo_is_linked(port)) + cpu_relax(); + } else { + __evtchn_fifo_handle_events(cpu, true); + } + +out: + put_online_cpus(); +} + static const struct evtchn_ops evtchn_ops_fifo = { .max_channels = evtchn_fifo_max_channels, .nr_channels = evtchn_fifo_nr_channels, @@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_ops_fifo = { .unmask = evtchn_fifo_unmask, .handle_events = evtchn_fifo_handle_events, .resume = evtchn_fifo_resume, + .close = evtchn_fifo_close, }; static int evtchn_fifo_alloc_control_block(unsigned cpu) diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 50c2050a..d18e123 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -68,6 +68,7 @@ struct evtchn_ops { bool (*test_and_set_mask)(unsigned port); void (*mask)(unsigned port); void (*unmask)(unsigned port); + void (*close)(unsigned port, unsigned cpu); void (*handle_events)(unsigned cpu); void (*resume)(void); @@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(void) evtchn_ops->resume(); } +static inline void xen_evtchn_op_close(unsigned port, unsigned cpu) +{ + if (evtchn_ops->close) + return evtchn_ops->close(port, cpu); +} + void xen_evtchn_2l_init(void); int xen_evtchn_fifo_init(void); -- cgit v0.10.2 From 257d5d9a9f19ee6c6801589c74791d5422c374e9 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Fri, 17 Jul 2015 16:47:23 +0300 Subject: ARM: dts: dra7: Add syscon-pllreset syscon to SATA PHY This register is required to be passed to the SATA PHY driver to workaround errata i783 (SATA Lockup After SATA DPLL Unlock/Relock). Signed-off-by: Roger Quadros Acked-by: Tony Lindgren Signed-off-by: Kishon Vijay Abraham I diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 8f1e25b..4a0718c 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1140,6 +1140,7 @@ ctrl-module = <&omap_control_sata>; clocks = <&sys_clkin1>, <&sata_ref_clk>; clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; #phy-cells = <0>; }; -- cgit v0.10.2 From 5406898354ebfb11f49b955fb5e49a62786a542f Mon Sep 17 00:00:00 2001 From: Mengdong Lin Date: Tue, 4 Aug 2015 15:47:35 +0100 Subject: ASoC: topology: fix typo in soc_tplg_kcontrol_bind_io() Signed-off-by: Mengdong Lin Signed-off-by: Liam Girdwood Signed-off-by: Mark Brown diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index d096068..6a547c6 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -534,7 +534,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, k->put = bops[i].put; if (k->get == NULL && bops[i].id == hdr->ops.get) k->get = bops[i].get; - if (k->info == NULL && ops[i].id == hdr->ops.info) + if (k->info == NULL && bops[i].id == hdr->ops.info) k->info = bops[i].info; } -- cgit v0.10.2 From f202a666e933f3c7557126d63833a6a3b577ac15 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Tue, 16 Jun 2015 21:06:24 +0200 Subject: batman-adv: avoid DAT to mess up LAN state When a node running DAT receives an ARP request from the LAN for the first time, it is likely that this node will request the ARP entry through the distributed ARP table (DAT) in the mesh. Once a DAT reply is received the asking node must check if the MAC address for which the IP address has been asked is local. If it is, the node must drop the ARP reply bceause the client should have replied on its own locally. Forwarding this reply means fooling any L2 bridge (e.g. Ethernet switches) lying between the batman-adv node and the LAN. This happens because the L2 bridge will think that the client sending the ARP reply lies somewhere in the mesh, while this node is sitting in the same LAN. Reported-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index fb54e6a..6d0b471 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @skb: packet to check * @hdr_size: size of the encapsulation header + * + * Returns true if the packet was snooped and consumed by DAT. False if the + * packet has to be delivered to the interface */ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, struct sk_buff *skb, int hdr_size) @@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, uint16_t type; __be32 ip_src, ip_dst; uint8_t *hw_src, *hw_dst; - bool ret = false; + bool dropped = false; unsigned short vid; if (!atomic_read(&bat_priv->distributed_arp_table)) @@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, /* if this REPLY is directed to a client of mine, let's deliver the * packet to the interface */ - ret = !batadv_is_my_client(bat_priv, hw_dst, vid); + dropped = !batadv_is_my_client(bat_priv, hw_dst, vid); + + /* if this REPLY is sent on behalf of a client of mine, let's drop the + * packet because the client will reply by itself + */ + dropped |= batadv_is_my_client(bat_priv, hw_src, vid); out: - if (ret) + if (dropped) kfree_skb(skb); - /* if ret == false -> packet has to be delivered to the interface */ - return ret; + /* if dropped == false -> deliver to the interface */ + return dropped; } /** -- cgit v0.10.2 From 354136bcc3c4f40a2813bba8f57ca5267d812d15 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 9 Jun 2015 21:24:36 +0800 Subject: batman-adv: fix kernel crash due to missing NULL checks batadv_softif_vlan_get() may return NULL which has to be verified by the caller. Fixes: 35df3b298fc8 ("batman-adv: fix TT VLAN inconsistency on VLAN re-add") Reported-by: Ryan Thompson Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index c002961..a2fc843 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -479,6 +479,9 @@ out: */ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) { + if (!vlan) + return; + if (atomic_dec_and_test(&vlan->refcount)) { spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); hlist_del_rcu(&vlan->list); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b482495..38b83c5 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -594,6 +594,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* increase the refcounter of the related vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); + if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", + addr, BATADV_PRINT_VID(vid))) + goto out; batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", @@ -1066,6 +1069,9 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); + if (!vlan) + goto out; + batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan); @@ -1166,8 +1172,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, tt_common_entry->vid); - batadv_softif_vlan_free_ref(vlan); - batadv_softif_vlan_free_ref(vlan); + if (vlan) { + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + } batadv_tt_local_entry_free_ref(tt_local); } @@ -3207,8 +3215,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); - batadv_softif_vlan_free_ref(vlan); - batadv_softif_vlan_free_ref(vlan); + if (vlan) { + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + } batadv_tt_local_entry_free_ref(tt_local); } -- cgit v0.10.2 From ef72706a0543d0c3a5ab29bd6378fdfb368118d9 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Wed, 17 Jun 2015 20:01:36 +0800 Subject: batman-adv: protect tt_local_entry from concurrent delete events The tt_local_entry deletion performed in batadv_tt_local_remove() was neither protecting against simultaneous deletes nor checking whether the element was still part of the list before calling hlist_del_rcu(). Replacing the hlist_del_rcu() call with batadv_hash_remove() provides adequate protection via hash spinlocks as well as an is-element-still-in-hash check to avoid 'blind' hash removal. Fixes: 068ee6e204e1 ("batman-adv: roaming handling mechanism redesign") Reported-by: alfonsname@web.de Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 38b83c5..5e953297 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1037,6 +1037,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, struct batadv_tt_local_entry *tt_local_entry; uint16_t flags, curr_flags = BATADV_NO_FLAGS; struct batadv_softif_vlan *vlan; + void *tt_entry_exists; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) @@ -1064,7 +1065,15 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, * immediately purge it */ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); - hlist_del_rcu(&tt_local_entry->common.hash_entry); + + tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, + batadv_compare_tt, + batadv_choose_tt, + &tt_local_entry->common); + if (!tt_entry_exists) + goto out; + + /* extra call to free the local tt entry */ batadv_tt_local_entry_free_ref(tt_local_entry); /* decrease the reference held for this vlan */ -- cgit v0.10.2 From 27a4d5efd417b6ef3190e9af357715532d4617a3 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 24 Jun 2015 14:50:19 +0200 Subject: batman-adv: initialize up/down values when adding a gateway Without this initialization, gateways which actually announce up/down bandwidth of 0/0 could be added. If these nodes get purged via _batadv_purge_orig() later, the gw_node structure does not get removed since batadv_gw_node_delete() updates the gw_node with up/down bandwidth of 0/0, and the updating function then discards the change and does not free gw_node. This results in leaking the gw_node structures, which references other structures: gw_node -> orig_node -> orig_node_ifinfo -> hardif. When removing the interface later, the open reference on the hardif may cause hangs with the infamous "unregister_netdevice: waiting for mesh1 to become free. Usage count = 1" message. Signed-off-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index bb015862..cffa92d 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; + gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); + gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw.list_lock); -- cgit v0.10.2 From aa65fa35ba6b589a12a6025739c2d935dd743b5a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 4 Aug 2015 23:23:50 -0400 Subject: may_follow_link() should use nd->inode Now that we can get there in RCU mode, we shouldn't play with nd->path.dentry->d_inode - it's not guaranteed to be stable. Use nd->inode instead. Reported-by: Hugh Dickins Signed-off-by: Al Viro diff --git a/fs/namei.c b/fs/namei.c index fbbcf09..1c2105e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd) return 0; /* Allowed if parent directory not sticky and world-writable. */ - parent = nd->path.dentry->d_inode; + parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; -- cgit v0.10.2 From fb1de5a4c825a389f054cc3803e06116d2fbdc7e Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 1 Aug 2015 07:01:24 -0700 Subject: staging: lustre: Include unaligned.h instead of access_ok.h Including access_ok.h causes the ia64:allmodconfig build (and maybe others) to fail with include/linux/unaligned/le_struct.h:6:19: error: redefinition of 'get_unaligned_le16' include/linux/unaligned/access_ok.h:7:19: note: previous definition of 'get_unaligned_le16' was here include/linux/unaligned/le_struct.h:26:20: error: redefinition of 'put_unaligned_le32' include/linux/unaligned/access_ok.h:42:20: note: previous definition of 'put_unaligned_le32' was here include/linux/unaligned/le_struct.h:31:20: error: redefinition of 'put_unaligned_le64' include/linux/unaligned/access_ok.h:47:20: note: previous definition of 'put_unaligned_le64' was here Include unaligned.h instead and leave it up to the architecture to decide how to implement unaligned accesses. Fixes: 8c4f136497315 ("Staging: lustre: Use put_unaligned_le64") Cc: Vaishali Thakkar Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c index 9c934e6..c61add4 100644 --- a/drivers/staging/lustre/lustre/obdclass/debug.c +++ b/drivers/staging/lustre/lustre/obdclass/debug.c @@ -40,7 +40,7 @@ #define DEBUG_SUBSYSTEM D_OTHER -#include +#include #include "../include/obd_support.h" #include "../include/lustre_debug.h" -- cgit v0.10.2 From c85523d1d97cc86aadc388221aa83ae9bc1e7cca Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Wed, 5 Aug 2015 09:21:04 +0900 Subject: Revert "ALSA: fireworks: add support for AudioFire2 quirk" This reverts commit 9c6893e0be38b6ca9a56a854226e51dee0a16a5a. The fix is superseded by the next commit as a better implementation for supporting AudioFire2/AudioFire4/AudioFirePre8 quirks. Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index c670db4..2682e7e 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -248,8 +248,6 @@ efw_probe(struct fw_unit *unit, err = get_hardware_info(efw); if (err < 0) goto error; - if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2) - efw->is_af2 = true; if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) efw->is_af9 = true; diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index c33252b..4f0201a 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -70,7 +70,6 @@ struct snd_efw { bool resp_addr_changable; /* for quirks */ - bool is_af2; bool is_af9; u32 firmware_version; diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index a0762dd..c55db1b 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -172,9 +172,6 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw) efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; /* Fireworks reset dbc at bus reset. */ efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; - /* AudioFire2 starts packets with non-zero dbc. */ - if (efw->is_af2) - efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK; /* AudioFire9 always reports wrong dbs. */ if (efw->is_af9) efw->tx_stream.flags |= CIP_WRONG_DBS; -- cgit v0.10.2 From 18f5ed365d3f188a91149d528c853000330a4a58 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Wed, 5 Aug 2015 09:21:05 +0900 Subject: ALSA: fireworks/firewire-lib: add support for recent firmware quirk Fireworks uses TSB43CB43(IceLynx-Micro) as its IEC 61883-1/6 interface. This chip includes ARM7 core, and loads and runs program. The firmware is stored in on-board memory and loaded every powering-on from it. Echo Audio ships several versions of firmwares for each model. These firmwares have each quirk and the quirk changes a sequence of packets. As long as I investigated, AudioFire2/AudioFire4/AudioFirePre8 have a quirk to transfer a first packet with 0x02 in its dbc field. This causes ALSA Fireworks driver to detect discontinuity. In this case, firmware version 5.7.0, 5.7.3 and 5.8.0 are used. Payload CIP CIP quadlets header1 header2 02 00050002 90ffffff <- 42 0005000a 90013000 42 00050012 90014400 42 0005001a 90015800 02 0005001a 90ffffff 42 00050022 90019000 42 0005002a 9001a400 42 00050032 9001b800 02 00050032 90ffffff 42 0005003a 9001d000 42 00050042 9001e400 42 0005004a 9001f800 02 0005004a 90ffffff (AudioFire2 with firmware version 5.7.) $ dmesg snd-fireworks fw1.0: Detect discontinuity of CIP: 00 02 These models, AudioFire8 (since Jul 2009 ) and Gibson Robot Interface Pack series uses the same ARM binary as their firmware. Thus, this quirk may be observed among them. This commit adds a new member for AMDTP structure. This member represents the value of dbc field in a first AMDTP packet. Drivers can set it with a preferred value according to model's quirk. Tested-by: Johannes Oertei Signed-off-by: Takashi Sakamoto Cc: Signed-off-by: Takashi Iwai diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 7bb988f..2a153d2 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c @@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s, s->data_block_counter != UINT_MAX) data_block_counter = s->data_block_counter; - if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || - (s->data_block_counter == UINT_MAX)) { + if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && + data_block_counter == s->tx_first_dbc) || + s->data_block_counter == UINT_MAX) { lost = false; } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { lost = data_block_counter != s->data_block_counter; diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h index 26b9093..b2cf9e7 100644 --- a/sound/firewire/amdtp.h +++ b/sound/firewire/amdtp.h @@ -157,6 +157,8 @@ struct amdtp_stream { /* quirk: fixed interval of dbc between previos/current packets. */ unsigned int tx_dbc_interval; + /* quirk: indicate the value of dbc field in a first packet. */ + unsigned int tx_first_dbc; bool callbacked; wait_queue_head_t callback_wait; diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 2682e7e..c94a432 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit, err = get_hardware_info(efw); if (err < 0) goto error; + /* AudioFire8 (since 2009) and AudioFirePre8 */ if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) efw->is_af9 = true; + /* These models uses the same firmware. */ + if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 || + entry->model_id == MODEL_ECHO_AUDIOFIRE_4 || + entry->model_id == MODEL_ECHO_AUDIOFIRE_9 || + entry->model_id == MODEL_GIBSON_RIP || + entry->model_id == MODEL_GIBSON_GOLDTOP) + efw->is_fireworks3 = true; snd_efw_proc_init(efw); diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index 4f0201a..084d414 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -71,6 +71,7 @@ struct snd_efw { /* for quirks */ bool is_af9; + bool is_fireworks3; u32 firmware_version; unsigned int midi_in_ports; diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index c55db1b..7e353f1 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw) efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; /* Fireworks reset dbc at bus reset. */ efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; + /* + * But Recent firmwares starts packets with non-zero dbc. + * Driver version 5.7.6 installs firmware version 5.7.3. + */ + if (efw->is_fireworks3 && + (efw->firmware_version == 0x5070000 || + efw->firmware_version == 0x5070300 || + efw->firmware_version == 0x5080000)) + efw->tx_stream.tx_first_dbc = 0x02; /* AudioFire9 always reports wrong dbs. */ if (efw->is_af9) efw->tx_stream.flags |= CIP_WRONG_DBS; -- cgit v0.10.2 From 87ce62802f7a3553234ebf1ae7cd52c8bf272fb9 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 5 Aug 2015 11:12:00 +0530 Subject: ARC: Make pt_regs regs unsigned KGDB fails to build after f51e2f191112 ("ARC: make sure instruction_pointer() returns unsigned value") The hack to force one specific reg to unsigned backfired. There's no reason to keep the regs signed after all. | CC arch/arc/kernel/kgdb.o |../arch/arc/kernel/kgdb.c: In function 'kgdb_trap': | ../arch/arc/kernel/kgdb.c:180:29: error: lvalue required as left operand of assignment | instruction_pointer(regs) -= BREAK_INSTR_SIZE; Reported-by: Yuriy Kolerov Fixes: f51e2f191112 ("ARC: make sure instruction_pointer() returns unsigned value") Cc: Alexey Brodkin Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 91694ec..69095da 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -20,20 +20,20 @@ struct pt_regs { /* Real registers */ - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long lp_start, lp_end, lp_count; + unsigned long lp_start, lp_end, lp_count; - long status32; /* status32_l1, status32_l2, erstatus */ - long ret; /* ilink1, ilink2 or eret */ - long blink; - long fp; - long r26; /* gp */ + unsigned long status32; /* status32_l1, status32_l2, erstatus */ + unsigned long ret; /* ilink1, ilink2 or eret */ + unsigned long blink; + unsigned long fp; + unsigned long r26; /* gp */ - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; /* user/kernel sp depending on where we came from */ - long orig_r0; + unsigned long sp; /* User/Kernel depending on where we came from */ + unsigned long orig_r0; /* * To distinguish bet excp, syscall, irq @@ -55,13 +55,13 @@ struct pt_regs { unsigned long event; }; - long user_r25; + unsigned long user_r25; }; #else struct pt_regs { - long orig_r0; + unsigned long orig_r0; union { struct { @@ -76,26 +76,26 @@ struct pt_regs { unsigned long event; }; - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long user_r25; + unsigned long user_r25; - long r26; /* gp */ - long fp; - long sp; /* user/kernel sp depending on where we came from */ + unsigned long r26; /* gp */ + unsigned long fp; + unsigned long sp; /* user/kernel sp depending on where we came from */ - long r12; + unsigned long r12; /*------- Below list auto saved by h/w -----------*/ - long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; + unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; - long blink; - long lp_end, lp_start, lp_count; + unsigned long blink; + unsigned long lp_end, lp_start, lp_count; - long ei, ldi, jli; + unsigned long ei, ldi, jli; - long ret; - long status32; + unsigned long ret; + unsigned long status32; }; #endif @@ -103,10 +103,10 @@ struct pt_regs { /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { - long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; }; -#define instruction_pointer(regs) (unsigned long)((regs)->ret) +#define instruction_pointer(regs) ((regs)->ret) #define profile_pc(regs) instruction_pointer(regs) /* return 1 if user mode or 0 if kernel mode */ @@ -142,7 +142,7 @@ struct callee_regs { static inline long regs_return_value(struct pt_regs *regs) { - return regs->r0; + return (long)regs->r0; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 76a7739..0b3ef63 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -32,20 +32,20 @@ */ struct user_regs_struct { - long pad; + unsigned long pad; struct { - long bta, lp_start, lp_end, lp_count; - long status32, ret, blink, fp, gp; - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; + unsigned long bta, lp_start, lp_end, lp_count; + unsigned long status32, ret, blink, fp, gp; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long sp; } scratch; - long pad2; + unsigned long pad2; struct { - long r25, r24, r23, r22, r21, r20; - long r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20; + unsigned long r19, r18, r17, r16, r15, r14, r13; } callee; - long efa; /* break pt addr, for break points in delay slots */ - long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ + unsigned long efa; /* break pt addr, for break points in delay slots */ + unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ }; #endif /* !__ASSEMBLY__ */ -- cgit v0.10.2 From ecf5fc6e9654cd7a268c782a523f072b2f1959f9 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 4 Aug 2015 14:36:58 -0700 Subject: mm, vmscan: Do not wait for page writeback for GFP_NOFS allocations Nikolay has reported a hang when a memcg reclaim got stuck with the following backtrace: PID: 18308 TASK: ffff883d7c9b0a30 CPU: 1 COMMAND: "rsync" #0 __schedule at ffffffff815ab152 #1 schedule at ffffffff815ab76e #2 schedule_timeout at ffffffff815ae5e5 #3 io_schedule_timeout at ffffffff815aad6a #4 bit_wait_io at ffffffff815abfc6 #5 __wait_on_bit at ffffffff815abda5 #6 wait_on_page_bit at ffffffff8111fd4f #7 shrink_page_list at ffffffff81135445 #8 shrink_inactive_list at ffffffff81135845 #9 shrink_lruvec at ffffffff81135ead #10 shrink_zone at ffffffff811360c3 #11 shrink_zones at ffffffff81136eff #12 do_try_to_free_pages at ffffffff8113712f #13 try_to_free_mem_cgroup_pages at ffffffff811372be #14 try_charge at ffffffff81189423 #15 mem_cgroup_try_charge at ffffffff8118c6f5 #16 __add_to_page_cache_locked at ffffffff8112137d #17 add_to_page_cache_lru at ffffffff81121618 #18 pagecache_get_page at ffffffff8112170b #19 grow_dev_page at ffffffff811c8297 #20 __getblk_slow at ffffffff811c91d6 #21 __getblk_gfp at ffffffff811c92c1 #22 ext4_ext_grow_indepth at ffffffff8124565c #23 ext4_ext_create_new_leaf at ffffffff81246ca8 #24 ext4_ext_insert_extent at ffffffff81246f09 #25 ext4_ext_map_blocks at ffffffff8124a848 #26 ext4_map_blocks at ffffffff8121a5b7 #27 mpage_map_one_extent at ffffffff8121b1fa #28 mpage_map_and_submit_extent at ffffffff8121f07b #29 ext4_writepages at ffffffff8121f6d5 #30 do_writepages at ffffffff8112c490 #31 __filemap_fdatawrite_range at ffffffff81120199 #32 filemap_flush at ffffffff8112041c #33 ext4_alloc_da_blocks at ffffffff81219da1 #34 ext4_rename at ffffffff81229b91 #35 ext4_rename2 at ffffffff81229e32 #36 vfs_rename at ffffffff811a08a5 #37 SYSC_renameat2 at ffffffff811a3ffc #38 sys_renameat2 at ffffffff811a408e #39 sys_rename at ffffffff8119e51e #40 system_call_fastpath at ffffffff815afa89 Dave Chinner has properly pointed out that this is a deadlock in the reclaim code because ext4 doesn't submit pages which are marked by PG_writeback right away. The heuristic was introduced by commit e62e384e9da8 ("memcg: prevent OOM with too many dirty pages") and it was applied only when may_enter_fs was specified. The code has been changed by c3b94f44fcb0 ("memcg: further prevent OOM with too many dirty pages") which has removed the __GFP_FS restriction with a reasoning that we do not get into the fs code. But this is not sufficient apparently because the fs doesn't necessarily submit pages marked PG_writeback for IO right away. ext4_bio_write_page calls io_submit_add_bh but that doesn't necessarily submit the bio. Instead it tries to map more pages into the bio and mpage_map_one_extent might trigger memcg charge which might end up waiting on a page which is marked PG_writeback but hasn't been submitted yet so we would end up waiting for something that never finishes. Fix this issue by replacing __GFP_IO by may_enter_fs check (for case 2) before we go to wait on the writeback. The page fault path, which is the only path that triggers memcg oom killer since 3.12, shouldn't require GFP_NOFS and so we shouldn't reintroduce the premature OOM killer issue which was originally addressed by the heuristic. As per David Chinner the xfs is doing similar thing since 2.6.15 already so ext4 is not the only affected filesystem. Moreover he notes: : For example: IO completion might require unwritten extent conversion : which executes filesystem transactions and GFP_NOFS allocations. The : writeback flag on the pages can not be cleared until unwritten : extent conversion completes. Hence memory reclaim cannot wait on : page writeback to complete in GFP_NOFS context because it is not : safe to do so, memcg reclaim or otherwise. Cc: stable@vger.kernel.org # 3.9+ [tytso@mit.edu: corrected the control flow] Fixes: c3b94f44fcb0 ("memcg: further prevent OOM with too many dirty pages") Reported-by: Nikolay Borisov Signed-off-by: Michal Hocko Signed-off-by: Hugh Dickins Signed-off-by: Linus Torvalds diff --git a/mm/vmscan.c b/mm/vmscan.c index e61445d..8286938 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, * caller can stall after page list has been processed. * * 2) Global or new memcg reclaim encounters a page that is - * not marked for immediate reclaim or the caller does not - * have __GFP_IO. In this case mark the page for immediate + * not marked for immediate reclaim, or the caller does not + * have __GFP_FS (or __GFP_IO if it's simply going to swap, + * not to fs). In this case mark the page for immediate * reclaim and continue scanning. * - * __GFP_IO is checked because a loop driver thread might + * Require may_enter_fs because we would wait on fs, which + * may not have submitted IO yet. And the loop driver might * enter reclaim, and deadlock if it waits on a page for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * - * Don't require __GFP_FS, since we're not going into the - * FS, just waiting on its writeback completion. Worryingly, - * ext4 gfs2 and xfs allocate pages with - * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing - * may_enter_fs here is liable to OOM on them. - * * 3) Legacy memcg encounters a page that is not already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many @@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 2 above */ } else if (sane_reclaim(sc) || - !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { + !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then -- cgit v0.10.2 From f58e5aa7b873b8a4376b816993d4b0e903befcba Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 4 Aug 2015 18:34:00 -0700 Subject: netfilter: conntrack: Use flags in nf_ct_tmpl_alloc() The flags were ignored for this function when it was introduced. Also fix the style problem in kzalloc. Fixes: 0838aa7fc (netfilter: fix netns dependencies with conntrack templates) Signed-off-by: Joe Stringer Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f168099..3c20d02 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags) { struct nf_conn *tmpl; - tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); + tmpl = kzalloc(sizeof(*tmpl), flags); if (tmpl == NULL) return NULL; @@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags) if (zone) { struct nf_conntrack_zone *nf_ct_zone; - nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC); + nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags); if (!nf_ct_zone) goto out_free; nf_ct_zone->id = zone; -- cgit v0.10.2 From 46011e6ea39235e4aca656673c500eac81a07a17 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 3 Aug 2015 17:48:43 -0700 Subject: MIPS: Make set_pte() SMP safe. On MIPS the GLOBAL bit of the PTE must have the same value in any aligned pair of PTEs. These pairs of PTEs are referred to as "buddies". In a SMP system is is possible for two CPUs to be calling set_pte() on adjacent PTEs at the same time. There is a race between setting the PTE and a different CPU setting the GLOBAL bit in its buddy PTE. This race can be observed when multiple CPUs are executing vmap()/vfree() at the same time. Make setting the buddy PTE's GLOBAL bit an atomic operation to close the race condition. The case of CONFIG_64BIT_PHYS_ADDR && CONFIG_CPU_MIPS32 is *not* handled. Signed-off-by: David Daney Cc: Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10835/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9d81067..ae85694 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ +#ifdef CONFIG_64BIT +#define LL_INSN "lld" +#define SC_INSN "scd" +#else /* CONFIG_32BIT */ +#define LL_INSN "ll" +#define SC_INSN "sc" +#endif + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + " .set push\n" + " .set noreorder\n" + "1: " LL_INSN " %[tmp], %[buddy]\n" + " bnez %[tmp], 2f\n" + " or %[tmp], %[tmp], %[global]\n" + " " SC_INSN " %[tmp], %[buddy]\n" + " beqz %[tmp], 1b\n" + " nop\n" + "2:\n" + " .set pop" + : [buddy] "+m" (buddy->pte), + [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ if (pte_none(*buddy)) pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ } #endif } -- cgit v0.10.2 From 4a6ca1a2c2530af4611024476ba7005bf0336dfe Mon Sep 17 00:00:00 2001 From: Jean-Francois Moine Date: Fri, 17 Jul 2015 13:07:35 +0200 Subject: drm/i2c: tda998x: fix bad checksum of the HDMI AVI infoframe The commit 8c7a075da9f7980cc95ffcd7e6621d4a87f20f40 "drm/i2c: tda998x: use drm_hdmi_avi_infoframe_from_display_mode()" also uses hdmi_avi_infoframe_pack() to create the AVI infoframe. This function sets the checksum of the frame and this breaks the second calculation of the checksum done in tda998x_write_if(). Fixes: 8c7a075da9f7980c ("drm/i2c: tda998x: use drm_hdmi_avi_infoframe_from_display_mode()") Signed-off-by: Jean-Francois Moine Signed-off-by: Russell King diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 290a69d..e7f2000 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -582,8 +582,6 @@ static void tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, uint8_t *buf, size_t size) { - buf[PB(0)] = tda998x_cksum(buf, size); - reg_clear(priv, REG_DIP_IF_FLAGS, bit); reg_write_range(priv, addr, buf, size); reg_set(priv, REG_DIP_IF_FLAGS, bit); @@ -603,6 +601,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p) buf[PB(4)] = p->audio_frame[4]; buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ + buf[PB(0)] = tda998x_cksum(buf, sizeof(buf)); + tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, sizeof(buf)); } -- cgit v0.10.2 From fc1a8126bf8095b10f5a79893f2d2b19227f88f2 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 4 Aug 2015 10:58:26 -0600 Subject: KVM: MTRR: Use default type for non-MTRR-covered gfn before WARN_ON The patch was munged on commit to re-order these tests resulting in excessive warnings when trying to do device assignment. Return to original ordering: https://lkml.org/lkml/2015/7/15/769 Fixes: 3e5d2fdceda1 ("KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type") Signed-off-by: Alex Williamson Reviewed-by: Xiao Guangrong Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index dc0a84a..9e8bf13 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) if (iter.mtrr_disabled) return mtrr_disabled_type(); + /* not contained in any MTRRs. */ + if (type == -1) + return mtrr_default_type(mtrr_state); + /* * We just check one page, partially covered by MTRRs is * impossible. */ WARN_ON(iter.partial_map); - /* not contained in any MTRRs. */ - if (type == -1) - return mtrr_default_type(mtrr_state); - return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); -- cgit v0.10.2 From 73851b36fe73819f8c201971e913324d4846a7ea Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Wed, 5 Aug 2015 18:03:34 +0800 Subject: ALSA: hda - one Dell machine needs the headphone white noise fixup The fixup ALC292_FIXUP_DISABLE_AAMIX can fix the white noise of the headphone on this Dell machine. Cc: Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c456c04..0b9847a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5189,6 +5189,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), -- cgit v0.10.2 From a4b45b25f18d1e798965efec429ba5fc01b9f0b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Thu, 30 Jul 2015 20:41:57 +0200 Subject: hwmon: (dell-smm) Blacklist Dell Studio XPS 8100 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CPU fan speed going up and down on Dell Studio XPS 8100 for unknown reasons. Without further debugging on the affected machine, it is not possible to find the problem. Link: https://bugzilla.kernel.org/show_bug.cgi?id=100121 Signed-off-by: Pali Rohár Tested-by: Jan C Peters Cc: stable@vger.kernel.org # v4.0+, will need backport [groeck: cleaned up description, comments] Signed-off-by: Guenter Roeck diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 37c16af..c848789 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); +static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { + { + /* + * CPU fan speed going up and down on Dell Studio XPS 8100 + * for unknown reasons. + */ + .ident = "Dell Studio XPS 8100", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), + }, + }, + { } +}; + /* * Probe for the presence of a supported laptop. */ @@ -940,7 +955,8 @@ static int __init i8k_probe(void) /* * Get DMI information */ - if (!dmi_check_system(i8k_dmi_table)) { + if (!dmi_check_system(i8k_dmi_table) || + dmi_check_system(i8k_blacklist_dmi_table)) { if (!ignore_dmi && !force) return -ENODEV; -- cgit v0.10.2 From 1252be9ce0ab4f622b8692b648894d09c0df71ce Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 30 Jul 2015 18:18:39 +0200 Subject: hwmon: (nct7904) Export I2C module alias information The I2C core always reports the MODALIAS uevent as "i2c: Cc: stable@vger.kernel.org # v4.1+ Signed-off-by: Guenter Roeck diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 6153df73..08ff89d 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = { {"nct7904", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, nct7904_id); static struct i2c_driver nct7904_driver = { .class = I2C_CLASS_HWMON, -- cgit v0.10.2 From de66b380977eb9daa925aeb21756a9b00f700e45 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 30 Jul 2015 18:18:43 +0200 Subject: hwmon: (g762) Export OF module alias information The I2C core always reports the MODALIAS uevent as "i2c: Signed-off-by: Guenter Roeck diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 9b55e67..85d106f 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = { { .compatible = "gmt,g763" }, { }, }; +MODULE_DEVICE_TABLE(of, g762_dt_match); /* * Grab clock (a required property), enable it, get (fixed) clock frequency -- cgit v0.10.2 From e661d0a04462dd98667f8947141bd8defab5b34a Mon Sep 17 00:00:00 2001 From: Marek Belisko Date: Wed, 29 Jul 2015 14:02:19 -0700 Subject: Input: twl4030-vibra - fix ERROR: Bad of_node_put() warning Fix following: [ 8.862274] ERROR: Bad of_node_put() on /ocp/i2c@48070000/twl@48/audio [ 8.869293] CPU: 0 PID: 1003 Comm: modprobe Not tainted 4.2.0-rc2-letux+ #1175 [ 8.876922] Hardware name: Generic OMAP36xx (Flattened Device Tree) [ 8.883514] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 8.891693] [] (show_stack) from [] (dump_stack+0x78/0x94) [ 8.899322] [] (dump_stack) from [] (kobject_release+0x68/0x7c) [ 8.907409] [] (kobject_release) from [] (twl4030_vibra_probe+0x74/0x188 [twl4030_vibra]) [ 8.917877] [] (twl4030_vibra_probe [twl4030_vibra]) from [] (platform_drv_probe+0x48/0x90) [ 8.928497] [] (platform_drv_probe) from [] (really_probe+0xd4/0x238) [ 8.937103] [] (really_probe) from [] (driver_probe_device+0x30/0x48) [ 8.945678] [] (driver_probe_device) from [] (__driver_attach+0x68/0x8c) [ 8.954589] [] (__driver_attach) from [] (bus_for_each_dev+0x50/0x84) [ 8.963226] [] (bus_for_each_dev) from [] (bus_add_driver+0xcc/0x1e4) [ 8.971832] [] (bus_add_driver) from [] (driver_register+0x9c/0xe0) [ 8.980255] [] (driver_register) from [] (do_one_initcall+0x100/0x1b8) [ 8.988983] [] (do_one_initcall) from [] (do_init_module+0x58/0x1c0) [ 8.997497] [] (do_init_module) from [] (SyS_init_module+0x54/0x64) [ 9.005950] [] (SyS_init_module) from [] (ret_fast_syscall+0x0/0x54) [ 9.015838] input: twl4030:vibrator as /devices/platform/68000000.ocp/48070000.i2c/i2c-0/0-0048/48070000.i2c:twl@48:audio/input/input2 node passed to of_find_node_by_name is put inside that function and new node is returned if found. Free returned node not already freed node. Signed-off-by: Marek Belisko Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index fc17b95..10c4e3d 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, if (pdata && pdata->coexist) return true; - if (of_find_node_by_name(node, "codec")) { + node = of_find_node_by_name(node, "codec"); + if (node) { of_node_put(node); return true; } -- cgit v0.10.2 From a0e2f50bdb588d91a553f2f6bd56be7bedc94b1a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 8 Jul 2015 22:23:38 -0400 Subject: drm/amdgpu: fix rb setting for CZ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always set num_rbs to 2 for CZ. The 1 RB parts are often harvest configs. The will get sorted out in mesa when we program PA_SC_RASTER_CONFIG[_1]. Acked-by: Christian König Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9e1d4dd..f7538dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1983,6 +1983,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; switch (adev->pdev->revision) { case 0xc4: @@ -1991,7 +1992,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcc: /* B10 */ adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc5: case 0x81: @@ -2000,14 +2000,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcd: /* B8 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc6: case 0xca: case 0xce: /* B6 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc7: case 0x87: @@ -2015,7 +2013,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) default: /* B4 */ adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; break; } -- cgit v0.10.2 From 0fd64291031d3587753b8adc53123b277855c777 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Sat, 1 Aug 2015 21:55:38 +0800 Subject: drm/amdgpu: increment queue when iterating on this variable. gfx_v7_0_print_status contains a for loop on variable queue which does not update this variable between each iteration. This is bug is reported by clang while building allmodconfig LLVMLinux on x86_64: drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:5126:19: error: variable 'queue' used in loop condition not modified in loop body [-Werror,-Wloop-analysis] for (queue = 0; queue < 8; i++) { ^~~~~ Fix this by incrementing variable queue instead of i in this loop. Signed-off-by: Nicolas Iooss Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2db6ab0..5c03420 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -5122,7 +5122,7 @@ static void gfx_v7_0_print_status(void *handle) dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", RREG32(mmCP_HPD_EOP_CONTROL)); - for (queue = 0; queue < 8; i++) { + for (queue = 0; queue < 8; queue++) { cik_srbm_select(adev, me, pipe, queue, 0); dev_info(adev->dev, " queue: %d\n", queue); dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", -- cgit v0.10.2 From 351643d7dd8a48b1053aac5fe3a1aebac614c301 Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Tue, 4 Aug 2015 10:43:50 +0800 Subject: drm/amdgpu: add feature version for RLC and MEC v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expose feature version to user space for RLC/MEC/MEC2 ucode as well v2: fix coding style Signed-off-by: Jammy Zhou Reviewed-by: Christian König diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 31b00f9..8db642b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1130,6 +1130,9 @@ struct amdgpu_gfx { uint32_t me_feature_version; uint32_t ce_feature_version; uint32_t pfp_feature_version; + uint32_t rlc_feature_version; + uint32_t mec_feature_version; + uint32_t mec2_feature_version; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9736892..79eba82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_INFO_FW_GFX_RLC: fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = 0; + fw_info.feature = adev->gfx.rlc_feature_version; break; case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) + if (info->query_fw.index == 0) { fw_info.ver = adev->gfx.mec_fw_version; - else if (info->query_fw.index == 1) + fw_info.feature = adev->gfx.mec_feature_version; + } else if (info->query_fw.index == 1) { fw_info.ver = adev->gfx.mec2_fw_version; - else + fw_info.feature = adev->gfx.mec2_feature_version; + } else return -EINVAL; - fw_info.feature = 0; break; case AMDGPU_INFO_FW_SMC: fw_info.ver = adev->pm.fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 5c03420..0d8bf2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3080,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu( + mec_hdr->ucode_feature_version); gfx_v7_0_cp_compute_enable(adev, false); @@ -3102,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + mec2_hdr->ucode_feature_version); /* MEC2 */ fw_data = (const __le32 *) @@ -4066,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu( + hdr->ucode_feature_version); gfx_v7_0_rlc_stop(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f7538dd..0ac38ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2273,6 +2273,8 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu( + hdr->ucode_feature_version); fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); @@ -2620,6 +2622,8 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu( + mec_hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->gfx.mec_fw->data + @@ -2639,6 +2643,8 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + mec2_hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + -- cgit v0.10.2 From cfa2104fbcb87ab0abbdaba608087df1e24fe195 Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Tue, 4 Aug 2015 10:50:47 +0800 Subject: drm/amdgpu: add feature version for SDMA ucode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jammy Zhou Reviewed-by: Christian König diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8db642b..f7b49d5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1642,6 +1642,7 @@ struct amdgpu_sdma { /* SDMA firmware */ const struct firmware *fw; uint32_t fw_version; + uint32_t feature_version; struct amdgpu_ring ring; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 79eba82..3bfe67d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -337,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (info->query_fw.index >= 2) return -EINVAL; fw_info.ver = adev->sdma[info->query_fw.index].fw_version; - fw_info.feature = 0; + fw_info.feature = adev->sdma[info->query_fw.index].feature_version; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ab83cc1..15df46c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index d789588..01bd5c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -542,6 +542,7 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->sdma[i].fw->data + diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 7bb37b9..cf9bc2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -631,6 +631,7 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->sdma[i].fw->data + -- cgit v0.10.2 From 595fd013f795daeed0c7ddda02d8e0c51d8ce76c Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Tue, 4 Aug 2015 11:44:19 +0800 Subject: drm/amdgpu: set fw_version and feature_version for smu fw loading MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fw_version and feature_verion should be set correctly when the firmwares are loaded by SMU on Tonga/Carrzio/Iceland Signed-off-by: Jammy Zhou Reviewed-by: Christian König diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0ac38ee..f5a42ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct gfx_firmware_header_v1_0 *cp_hdr; DRM_DEBUG("\n"); @@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.pfp_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); @@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.me_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); @@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.ce_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); @@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); @@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw_version = le32_to_cpu( + cp_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + cp_hdr->ucode_feature_version); } else { err = 0; adev->gfx.mec2_fw = NULL; @@ -2272,9 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); - adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->gfx.rlc_feature_version = le32_to_cpu( - hdr->ucode_feature_version); fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); @@ -2360,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); amdgpu_ucode_print_gfx_hdr(&me_hdr->header); - adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); - adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); - adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); - adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); - adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); - adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); gfx_v8_0_cp_gfx_enable(adev, false); @@ -2621,9 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); - adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); - adev->gfx.mec_feature_version = le32_to_cpu( - mec_hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->gfx.mec_fw->data + @@ -2642,9 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); - adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); - adev->gfx.mec2_feature_version = le32_to_cpu( - mec2_hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 01bd5c9..a988dfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -541,9 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index cf9bc2e..2b86569 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -630,9 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); -- cgit v0.10.2 From df4198b1e0c4a7d1adde1e5c2ceb67ac10b391bb Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 6 Aug 2015 13:54:21 +0800 Subject: virtio-input: reset device and detach unused during remove Spec requires a device reset during cleanup, so do it and avoid warn in virtio core. And detach unused buffers to avoid memory leak. Signed-off-by: Jason Wang Signed-off-by: Michael S. Tsirkin diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 60e2a16..c96944b 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -313,6 +313,7 @@ err_init_vq: static void virtinput_remove(struct virtio_device *vdev) { struct virtio_input *vi = vdev->priv; + void *buf; unsigned long flags; spin_lock_irqsave(&vi->lock, flags); @@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev) spin_unlock_irqrestore(&vi->lock, flags); input_unregister_device(vi->idev); + vdev->config->reset(vdev); + while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL) + kfree(buf); vdev->config->del_vqs(vdev); kfree(vi); } -- cgit v0.10.2 From 6dc6db790a67d28e46abefc44ca1a3bd438b2920 Mon Sep 17 00:00:00 2001 From: "Subhransu S. Prusty" Date: Mon, 29 Jun 2015 17:36:44 +0100 Subject: ASoC: topology: Add subsequence in topology Some widgets may need sorting within, So add this support in topology. Signed-off-by: Subhransu S. Prusty Signed-off-by: Mark Brown diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 1221520..7ae13fb 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -347,6 +347,7 @@ struct snd_soc_tplg_dapm_widget { __le32 reg; /* negative reg = no direct dapm */ __le32 shift; /* bits to shift */ __le32 mask; /* non-shifted mask */ + __le32 subseq; /* sort within widget type */ __u32 invert; /* invert the power bit */ __u32 ignore_suspend; /* kept enabled over suspend */ __u16 event_flags; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 6a547c6..9f2b048 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1351,6 +1351,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg, template.reg = w->reg; template.shift = w->shift; template.mask = w->mask; + template.subseq = w->subseq; template.on_val = w->invert ? 0 : 1; template.off_val = w->invert ? 1 : 0; template.ignore_suspend = w->ignore_suspend; -- cgit v0.10.2 From c3879956957b8de9fd6cbad604e668fd00c6506c Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 29 Jun 2015 17:36:46 +0100 Subject: ASoC: topology: add private data to manifest The topology file manifest should include a private data field. This allows vendors to specify vendor data in the manifest, like timestamps, hashes, additional information for removing platform configuration out of drivers and making these configurable per platform Signed-off-by: Vinod Koul Signed-off-by: Mark Brown diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 7ae13fb..d550c8d 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -238,6 +238,7 @@ struct snd_soc_tplg_manifest { __le32 graph_elems; /* number of graph elements */ __le32 dai_elems; /* number of DAI elements */ __le32 dai_link_elems; /* number of DAI link elements */ + struct snd_soc_tplg_private priv; } __attribute__((packed)); /* -- cgit v0.10.2 From 28a87eebcad40101b1b273cbd4f2a02c104f9367 Mon Sep 17 00:00:00 2001 From: Mengdong Lin Date: Wed, 5 Aug 2015 14:41:13 +0100 Subject: ASoC: topology: Update TLV support so we can support more TLV types Currently the TLV topology structure is targeted at only supporting the DB scale data. This patch extends support for the other TLV types so they can be easily added at a later stage. TLV structure is moved to common topology control header since it's a common field for controls and can be processed in a general way. Users must set a proper access flag for a control since it's used to decide if the TLV field is valid and if a TLV callback is needed. Removed the following fields from topology TLV struct: - size/count: type can decide the size. - numid: not needed to initialize TLV for kcontrol. - data: replaced by the type specific struct. Added TLV structure to generic control header and removed TLV structure from mixer control. Signed-off-by: Mengdong Lin Signed-off-by: Liam Girdwood Signed-off-by: Mark Brown diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 2819fc1..aa3a79b 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -137,11 +137,19 @@ struct snd_soc_tplg_private { /* * Kcontrol TLV data. */ +struct snd_soc_tplg_tlv_dbscale { + __le32 min; + __le32 step; + __le32 mute; +} __attribute__((packed)); + struct snd_soc_tplg_ctl_tlv { - __le32 size; /* in bytes aligned to 4 */ - __le32 numid; /* control element numeric identification */ - __le32 count; /* number of elem in data array */ - __le32 data[SND_SOC_TPLG_TLV_SIZE]; + __le32 size; /* in bytes of this structure */ + __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */ + union { + __le32 data[SND_SOC_TPLG_TLV_SIZE]; + struct snd_soc_tplg_tlv_dbscale scale; + }; } __attribute__((packed)); /* @@ -172,7 +180,7 @@ struct snd_soc_tplg_ctl_hdr { char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; __le32 access; struct snd_soc_tplg_kcontrol_ops_id ops; - __le32 tlv_size; /* non zero means control has TLV data */ + struct snd_soc_tplg_ctl_tlv tlv; } __attribute__((packed)); /* @@ -260,7 +268,6 @@ struct snd_soc_tplg_mixer_control { __le32 invert; __le32 num_channels; struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; - struct snd_soc_tplg_ctl_tlv tlv; struct snd_soc_tplg_private priv; } __attribute__((packed)); diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 2c70f30..31068b8 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -33,6 +33,7 @@ #include #include #include +#include /* * We make several passes over the data (since it wont necessarily be ordered) @@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg, return 0; } + +static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg, + struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale) +{ + unsigned int item_len = 2 * sizeof(unsigned int); + unsigned int *p; + + p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p[0] = SNDRV_CTL_TLVT_DB_SCALE; + p[1] = item_len; + p[2] = scale->min; + p[3] = (scale->step & TLV_DB_SCALE_MASK) + | (scale->mute ? TLV_DB_SCALE_MUTE : 0); + + kc->tlv.p = (void *)p; + return 0; +} + static int soc_tplg_create_tlv(struct soc_tplg *tplg, - struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv) + struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc) { - struct snd_ctl_tlv *tlv; - int size; + struct snd_soc_tplg_ctl_tlv *tplg_tlv; - if (tplg_tlv->count == 0) + if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)) return 0; - size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) & - ~(sizeof(unsigned int) - 1)); - tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL); - if (tlv == NULL) - return -ENOMEM; - - dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", - tplg_tlv->numid, size); + if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { + kc->tlv.c = snd_soc_bytes_tlv_callback; + } else { + tplg_tlv = &tc->tlv; + switch (tplg_tlv->type) { + case SNDRV_CTL_TLVT_DB_SCALE: + return soc_tplg_create_tlv_db_scale(tplg, kc, + &tplg_tlv->scale); - tlv->numid = tplg_tlv->numid; - tlv->length = size; - memcpy(&tlv->tlv[0], tplg_tlv->data, size); - kc->tlv.p = (void *)tlv; + /* TODO: add support for other TLV types */ + default: + dev_dbg(tplg->dev, "Unsupported TLV type %d\n", + tplg_tlv->type); + return -EINVAL; + } + } return 0; } @@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count, } /* create any TLV data */ - soc_tplg_create_tlv(tplg, &kc, &mc->tlv); + soc_tplg_create_tlv(tplg, &kc, &mc->hdr); /* register control here */ err = soc_tplg_add_kcontrol(tplg, &kc, -- cgit v0.10.2 From cb88498b36ab01cbe3a0d95cd097e4afdff4c6fd Mon Sep 17 00:00:00 2001 From: Mengdong Lin Date: Wed, 5 Aug 2015 14:41:14 +0100 Subject: ASoC: topology: Add ops support to byte controls UAPI Add UAPI support for setting byte control ops. Rename the ops structure to be more generic so it can be sued by other objects too. Signed-off-by: Mengdong Lin Signed-off-by: Liam Girdwood Signed-off-by: Mark Brown diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index aa3a79b..d5281ac 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -163,9 +163,11 @@ struct snd_soc_tplg_channel { } __attribute__((packed)); /* - * Kcontrol Operations IDs + * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops + * Kcontrol ops need get/put/info. + * Bytes ext ops need get/put. */ -struct snd_soc_tplg_kcontrol_ops_id { +struct snd_soc_tplg_io_ops { __le32 get; __le32 put; __le32 info; @@ -179,7 +181,7 @@ struct snd_soc_tplg_ctl_hdr { __le32 type; char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; __le32 access; - struct snd_soc_tplg_kcontrol_ops_id ops; + struct snd_soc_tplg_io_ops ops; struct snd_soc_tplg_ctl_tlv tlv; } __attribute__((packed)); @@ -311,6 +313,7 @@ struct snd_soc_tplg_bytes_control { __le32 mask; __le32 base; __le32 num_regs; + struct snd_soc_tplg_io_ops ext_ops; struct snd_soc_tplg_private priv; } __attribute__((packed)); -- cgit v0.10.2 From c7bcf8777a539e64dafc7417c00047aee6eb8909 Mon Sep 17 00:00:00 2001 From: Liam Girdwood Date: Wed, 5 Aug 2015 14:41:15 +0100 Subject: ASoC: topology: Add private data type and bump ABI version to 3 Add ID for standalone private data object types and bump ABI version to 3 in order to userpsace features. Signed-off-by: Liam Girdwood Signed-off-by: Mark Brown diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index d5281ac..51b8066 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -77,7 +77,7 @@ #define SND_SOC_TPLG_NUM_TEXTS 16 /* ABI version */ -#define SND_SOC_TPLG_ABI_VERSION 0x2 +#define SND_SOC_TPLG_ABI_VERSION 0x3 /* Max size of TLV data */ #define SND_SOC_TPLG_TLV_SIZE 32 @@ -97,7 +97,8 @@ #define SND_SOC_TPLG_TYPE_PCM 7 #define SND_SOC_TPLG_TYPE_MANIFEST 8 #define SND_SOC_TPLG_TYPE_CODEC_LINK 9 -#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK +#define SND_SOC_TPLG_TYPE_PDATA 10 +#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA /* vendor block IDs - please add new vendor types to end */ #define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 -- cgit v0.10.2 From cb92205bad2e4dd630b884142dd707b72504c200 Mon Sep 17 00:00:00 2001 From: Jakub Pawlowski Date: Wed, 5 Aug 2015 23:16:29 +0200 Subject: Bluetooth: fix MGMT_EV_NEW_LONG_TERM_KEY event This patch fixes how MGMT_EV_NEW_LONG_TERM_KEY event is build. Right now val vield is filled with only 1 byte, instead of whole value. This bug was introduced in commit 1fc62c526a57 ("Bluetooth: Fix exposing full value of shortened LTKs") Before that patch, if you paired with device using bluetoothd using simple pairing, and then restarted bluetoothd, you would be able to re-connect, but device would fail to establish encryption and would terminate connection. After this patch connecting after bluetoothd restart works fine. Signed-off-by: Jakub Pawlowski Signed-off-by: Marcel Holtmann diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7998fb2..92720f3 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) /* Make sure we copy only the significant bytes based on the * encryption key size, and set the rest of the value to zeroes. */ - memcpy(ev.key.val, key->val, sizeof(key->enc_size)); + memcpy(ev.key.val, key->val, key->enc_size); memset(ev.key.val + key->enc_size, 0, sizeof(ev.key.val) - key->enc_size); -- cgit v0.10.2 From 047fe6e6db9161e69271f56daaafdaf2add023b1 Mon Sep 17 00:00:00 2001 From: David Weinehall Date: Tue, 4 Aug 2015 16:55:52 +0300 Subject: drm/i915: Allow parsing of variable size child device entries from VBT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VBT version 196 increased the size of common_child_dev_config. The parser code assumed that the size of this structure would not change. The modified code now copies the amount needed based on the VBT version, and emits a debug message if the VBT version is unknown (too new); since the struct config block won't shrink in newer versions it should be harmless to copy the maximum known size in such cases, so that's what we do, but emitting the warning is probably sensible anyway. In the longer run it might make sense to modify the parser code to use a version/feature mapping, rather than hardcoding things like this, but for now the variants are fairly managable. This fixes a regression introduced in commit 90e4f1592bb6e82f6690f0e05a8aadcf04d7bce7 Author: Ville Syrjälä Date: Wed Mar 25 18:45:58 2015 +0200 drm/i915: Fix the VBT child device parsing for BSW since we're hitting a DRM_ERROR on older platforms with this. v2: Stricter size checks Signed-off-by: David Weinehall [danvet: Fixup format string.] Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 198fc3c..3dcd59e 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1075,15 +1075,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv, const union child_device_config *p_child; union child_device_config *child_dev_ptr; int i, child_device_num, count; - u16 block_size; + u8 expected_size; + u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } - if (p_defs->child_dev_size < sizeof(*p_child)) { - DRM_ERROR("General definiton block child device size is too small.\n"); + if (bdb->version < 195) { + expected_size = 33; + } else if (bdb->version == 195) { + expected_size = 37; + } else if (bdb->version <= 197) { + expected_size = 38; + } else { + expected_size = 38; + DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", + expected_size, bdb->version); + } + + if (expected_size > sizeof(*p_child)) { + DRM_ERROR("child_device_config cannot fit in p_child\n"); + return; + } + + if (p_defs->child_dev_size != expected_size) { + DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", + p_defs->child_dev_size, expected_size, bdb->version); return; } /* get the block size of general definitions */ @@ -1130,7 +1149,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, child_dev_ptr = dev_priv->vbt.child_dev + count; count++; - memcpy(child_dev_ptr, p_child, sizeof(*p_child)); + memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); } return; } -- cgit v0.10.2 From c05f9429e12da7c7de2649ef8c8c16bf8c12061f Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 3 Aug 2015 14:44:29 +0800 Subject: btrfs: qgroup: Fix a regression in qgroup reserved space. During the change to new btrfs extent-oriented qgroup implement, due to it doesn't use the old __qgroup_excl_accounting() for exclusive extent, it didn't free the reserved bytes. The bug will cause limit function go crazy as the reserved space is never freed, increasing limit will have no effect and still cause EQOUT. The fix is easy, just free reserved bytes for newly created exclusive extent as what it does before. Reported-by: Tsutomu Itoh Signed-off-by: Yang Dongsheng Signed-off-by: Qu Wenruo Signed-off-by: Chris Mason diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index e9ace09..8a82029 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, /* Exclusive -> exclusive, nothing changed */ } } + + /* For exclusive extent, free its reserved bytes too */ + if (nr_old_roots == 0 && nr_new_roots == 1 && + cur_new_count == nr_new_roots) + qg->reserved -= num_bytes; if (dirty) qgroup_dirty(fs_info, qg); } -- cgit v0.10.2 From de54b9ac253787c366bbfb28d901a31954eb3511 Mon Sep 17 00:00:00 2001 From: Marcus Gelderie Date: Thu, 6 Aug 2015 15:46:10 -0700 Subject: ipc: modify message queue accounting to not take kernel data structures into account A while back, the message queue implementation in the kernel was improved to use btrees to speed up retrieval of messages, in commit d6629859b36d ("ipc/mqueue: improve performance of send/recv"). That patch introducing the improved kernel handling of message queues (using btrees) has, as a by-product, changed the meaning of the QSIZE field in the pseudo-file created for the queue. Before, this field reflected the size of the user-data in the queue. Since, it also takes kernel data structures into account. For example, if 13 bytes of user data are in the queue, on my machine the file reports a size of 61 bytes. There was some discussion on this topic before (for example https://lkml.org/lkml/2014/10/1/115). Commenting on a th lkml, Michael Kerrisk gave the following background (https://lkml.org/lkml/2015/6/16/74): The pseudofiles in the mqueue filesystem (usually mounted at /dev/mqueue) expose fields with metadata describing a message queue. One of these fields, QSIZE, as originally implemented, showed the total number of bytes of user data in all messages in the message queue, and this feature was documented from the beginning in the mq_overview(7) page. In 3.5, some other (useful) work happened to break the user-space API in a couple of places, including the value exposed via QSIZE, which now includes a measure of kernel overhead bytes for the queue, a figure that renders QSIZE useless for its original purpose, since there's no way to deduce the number of overhead bytes consumed by the implementation. (The other user-space breakage was subsequently fixed.) This patch removes the accounting of kernel data structures in the queue. Reporting the size of these data-structures in the QSIZE field was a breaking change (see Michael's comment above). Without the QSIZE field reporting the total size of user-data in the queue, there is no way to deduce this number. It should be noted that the resource limit RLIMIT_MSGQUEUE is counted against the worst-case size of the queue (in both the old and the new implementation). Therefore, the kernel overhead accounting in QSIZE is not necessary to help the user understand the limitations RLIMIT imposes on the processes. Signed-off-by: Marcus Gelderie Acked-by: Doug Ledford Acked-by: Michael Kerrisk Acked-by: Davidlohr Bueso Cc: David Howells Cc: Alexander Viro Cc: John Duffy Cc: Arto Bendiken Cc: Manfred Spraul Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/ipc/mqueue.c b/ipc/mqueue.c index a24ba9f..161a180 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); - info->qsize += sizeof(*leaf); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); @@ -187,7 +186,6 @@ try_again: "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -200,7 +198,6 @@ try_again: if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); new_leaf = NULL; } else { kfree(new_leaf); @@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); } else { kfree(new_leaf); } -- cgit v0.10.2 From 7ace99170789bc53cbb7e9e352d7a3851208fbcf Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 6 Aug 2015 15:46:13 -0700 Subject: mm, meminit: allow early_pfn_to_nid to be used during runtime early_pfn_to_nid() historically was inherently not SMP safe but only used during boot which is inherently single threaded or during hotplug which is protected by a giant mutex. With deferred memory initialisation there was a thread-safe version introduced and the early_pfn_to_nid would trigger a BUG_ON if used unsafely. Memory hotplug hit that check. This patch makes early_pfn_to_nid introduces a lock to make it safe to use during hotplug. Signed-off-by: Mel Gorman Reported-by: Alex Ng Tested-by: Alex Ng Acked-by: Peter Zijlstra (Intel) Cc: Nicolai Stange Cc: Dave Hansen Cc: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef19f22..ea0e6a6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -981,21 +981,21 @@ static void __init __free_pages_boot_core(struct page *page, #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) -/* Only safe to use early in boot when initialisation is single-threaded */ + static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; int __meminit early_pfn_to_nid(unsigned long pfn) { + static DEFINE_SPINLOCK(early_pfn_lock); int nid; - /* The system will behave unpredictably otherwise */ - BUG_ON(system_state != SYSTEM_BOOTING); - + spin_lock(&early_pfn_lock); nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); - if (nid >= 0) - return nid; - /* just returns 0 */ - return 0; + if (nid < 0) + nid = 0; + spin_unlock(&early_pfn_lock); + + return nid; } #endif -- cgit v0.10.2 From d3cd131d935ab3bab700491edbbd7cad4040ce50 Mon Sep 17 00:00:00 2001 From: Nicolai Stange Date: Thu, 6 Aug 2015 15:46:16 -0700 Subject: mm, meminit: replace rwsem with completion Commit 0e1cc95b4cc7 ("mm: meminit: finish initialisation of struct pages before basic setup") introduced a rwsem to signal completion of the initialization workers. Lockdep complains about possible recursive locking: ============================================= [ INFO: possible recursive locking detected ] 4.1.0-12802-g1dc51b8 #3 Not tainted --------------------------------------------- swapper/0/1 is trying to acquire lock: (pgdat_init_rwsem){++++.+}, at: [] page_alloc_init_late+0xc7/0xe6 but task is already holding lock: (pgdat_init_rwsem){++++.+}, at: [] page_alloc_init_late+0x3e/0xe6 Replace the rwsem by a completion together with an atomic "outstanding work counter". [peterz@infradead.org: Barrier removal on the grounds of being pointless] [mgorman@suse.de: Applied review feedback] Signed-off-by: Nicolai Stange Signed-off-by: Mel Gorman Acked-by: Peter Zijlstra (Intel) Cc: Dave Hansen Cc: Alex Ng Cc: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ea0e6a6..3226282 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page, __free_pages_boot_core(page, pfn, 0); } -static __initdata DECLARE_RWSEM(pgdat_init_rwsem); +/* Completion tracking for deferred_init_memmap() threads */ +static atomic_t pgdat_init_n_undone __initdata; +static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); + +static inline void __init pgdat_init_report_one_done(void) +{ + if (atomic_dec_and_test(&pgdat_init_n_undone)) + complete(&pgdat_init_all_done_comp); +} /* Initialise remaining memory on a node */ static int __init deferred_init_memmap(void *data) @@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data) const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (first_init_pfn == ULONG_MAX) { - up_read(&pgdat_init_rwsem); + pgdat_init_report_one_done(); return 0; } @@ -1177,7 +1184,8 @@ free_range: pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, jiffies_to_msecs(jiffies - start)); - up_read(&pgdat_init_rwsem); + + pgdat_init_report_one_done(); return 0; } @@ -1185,14 +1193,14 @@ void __init page_alloc_init_late(void) { int nid; + /* There will be num_node_state(N_MEMORY) threads */ + atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); for_each_node_state(nid, N_MEMORY) { - down_read(&pgdat_init_rwsem); kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } /* Block until all are initialised */ - down_write(&pgdat_init_rwsem); - up_write(&pgdat_init_rwsem); + wait_for_completion(&pgdat_init_all_done_comp); } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ -- cgit v0.10.2 From 4248b0da460839e30eaaad78992b9a1dd3e63e21 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 6 Aug 2015 15:46:20 -0700 Subject: fs, file table: reinit files_stat.max_files after deferred memory initialisation Dave Hansen reported the following; My laptop has been behaving strangely with 4.2-rc2. Once I log in to my X session, I start getting all kinds of strange errors from applications and see this in my dmesg: VFS: file-max limit 8192 reached The problem is that the file-max is calculated before memory is fully initialised and miscalculates how much memory the kernel is using. This patch recalculates file-max after deferred memory initialisation. Note that using memory hotplug infrastructure would not have avoided this problem as the value is not recalculated after memory hot-add. 4.1: files_stat.max_files = 6582781 4.2-rc2: files_stat.max_files = 8192 4.2-rc2 patched: files_stat.max_files = 6562467 Small differences with the patch applied and 4.1 but not enough to matter. Signed-off-by: Mel Gorman Reported-by: Dave Hansen Cc: Nicolai Stange Cc: Dave Hansen Cc: Alex Ng Cc: Fengguang Wu Cc: Peter Zijlstra (Intel) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/dcache.c b/fs/dcache.c index 5c8ea15..9b5fe50 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void) inode_init_early(); } -void __init vfs_caches_init(unsigned long mempages) +void __init vfs_caches_init(void) { - unsigned long reserve; - - /* Base hash sizes on available memory, with a reserve equal to - 150% of current kernel size */ - - reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); - mempages -= reserve; - names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); dcache_init(); inode_init(); - files_init(mempages); + files_init(); + files_maxfiles_init(); mnt_init(); bdev_cache_init(); chrdev_init(); diff --git a/fs/file_table.c b/fs/file_table.c index 7f9d407..ad17e05 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -308,19 +309,24 @@ void put_filp(struct file *file) } } -void __init files_init(unsigned long mempages) +void __init files_init(void) { - unsigned long n; - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + percpu_counter_init(&nr_files, 0, GFP_KERNEL); +} - /* - * One file with associated inode and dcache is very roughly 1K. - * Per default don't use more than 10% of our memory for files. - */ +/* + * One file with associated inode and dcache is very roughly 1K. Per default + * do not use more than 10% of our memory for files. + */ +void __init files_maxfiles_init(void) +{ + unsigned long n; + unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2; + + memreserve = min(memreserve, totalram_pages - 1); + n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; - n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); - percpu_counter_init(&nr_files, 0, GFP_KERNEL); } diff --git a/include/linux/fs.h b/include/linux/fs.h index cc008c3..84b783f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -55,7 +55,8 @@ struct vm_fault; extern void __init inode_init(void); extern void __init inode_init_early(void); -extern void __init files_init(unsigned long); +extern void __init files_init(void); +extern void __init files_maxfiles_init(void); extern struct files_stat_struct files_stat; extern unsigned long get_max_files(void); @@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp); /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); -extern void __init vfs_caches_init(unsigned long); +extern void __init vfs_caches_init(void); extern struct kmem_cache *names_cachep; diff --git a/init/main.c b/init/main.c index c5d5626..5650655 100644 --- a/init/main.c +++ b/init/main.c @@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void) key_init(); security_init(); dbg_late_init(); - vfs_caches_init(totalram_pages); + vfs_caches_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3226282..cb61f44e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1201,6 +1201,9 @@ void __init page_alloc_init_late(void) /* Block until all are initialised */ wait_for_completion(&pgdat_init_all_done_comp); + + /* Reinit limits that are based on free pages after the kernel is up */ + files_maxfiles_init(); } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ -- cgit v0.10.2 From 209f7512d007980fd111a74a064d70a3656079cf Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 6 Aug 2015 15:46:23 -0700 Subject: ocfs2: fix BUG in ocfs2_downconvert_thread_do_work() The "BUG_ON(list_empty(&osb->blocked_lock_list))" in ocfs2_downconvert_thread_do_work can be triggered in the following case: ocfs2dc has firstly saved osb->blocked_lock_count to local varibale processed, and then processes the dentry lockres. During the dentry put, it calls iput and then deletes rw, inode and open lockres from blocked list in ocfs2_mark_lockres_freeing. And this causes the variable `processed' to not reflect the number of blocked lockres to be processed, which triggers the BUG. Signed-off-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8b23aa2..23157e4 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) osb->dc_work_sequence = osb->dc_wake_sequence; processed = osb->blocked_lock_count; - while (processed) { - BUG_ON(list_empty(&osb->blocked_lock_list)); - + /* + * blocked lock processing in this loop might call iput which can + * remove items off osb->blocked_lock_list. Downconvert up to + * 'processed' number of locks, but stop short if we had some + * removed in ocfs2_mark_lockres_freeing when downconverting. + */ + while (processed && !list_empty(&osb->blocked_lock_list)) { lockres = list_entry(osb->blocked_lock_list.next, struct ocfs2_lock_res, l_blocked_list); list_del_init(&lockres->l_blocked_list); -- cgit v0.10.2 From 3c00cb5e68dc719f2fc73a33b1b230aadfcb1309 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 6 Aug 2015 15:46:26 -0700 Subject: signal: fix information leak in copy_siginfo_from_user32 This function can leak kernel stack data when the user siginfo_t has a positive si_code value. The top 16 bits of si_code descibe which fields in the siginfo_t union are active, but they are treated inconsistently between copy_siginfo_from_user32, copy_siginfo_to_user32 and copy_siginfo_to_user. copy_siginfo_from_user32 is called from rt_sigqueueinfo and rt_tgsigqueueinfo in which the user has full control overthe top 16 bits of si_code. This fixes the following information leaks: x86: 8 bytes leaked when sending a signal from a 32-bit process to itself. This leak grows to 16 bytes if the process uses x32. (si_code = __SI_CHLD) x86: 100 bytes leaked when sending a signal from a 32-bit process to a 64-bit process. (si_code = -1) sparc: 4 bytes leaked when sending a signal from a 32-bit process to a 64-bit process. (si_code = any) parsic and s390 have similar bugs, but they are not vulnerable because rt_[tg]sigqueueinfo have checks that prevent sending a positive si_code to a different process. These bugs are also fixed for consistency. Signed-off-by: Amanieu d'Antras Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Russell King Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Chris Metcalf Cc: Paul Mackerras Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 1670f15..81fd38f 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -201,8 +201,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE)) diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 19a7705..5d7f263 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d3a831a..da50e0c 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index e8c2c04..c667e10 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) return -EFAULT; - memset(to, 0, sizeof(*to)); - err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); diff --git a/kernel/signal.c b/kernel/signal.c index 836df8d..00524cf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3017,7 +3017,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + siginfo_t info = {}; int ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; @@ -3061,7 +3061,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + siginfo_t info = {}; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; -- cgit v0.10.2 From 26135022f85105ad725cda103fa069e29e83bd16 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 6 Aug 2015 15:46:29 -0700 Subject: signal: fix information leak in copy_siginfo_to_user This function may copy the si_addr_lsb, si_lower and si_upper fields to user mode when they haven't been initialized, which can leak kernel stack data to user mode. Just checking the value of si_code is insufficient because the same si_code value is shared between multiple signals. This is solved by checking the value of si_signo in addition to si_code. Signed-off-by: Amanieu d'Antras Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 81fd38f..948f0ad 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitely for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif break; diff --git a/kernel/signal.c b/kernel/signal.c index 00524cf..0f6bbbe 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif #ifdef SEGV_BNDERR - err |= __put_user(from->si_lower, &to->si_lower); - err |= __put_user(from->si_upper, &to->si_upper); + if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) { + err |= __put_user(from->si_lower, &to->si_lower); + err |= __put_user(from->si_upper, &to->si_upper); + } #endif break; case __SI_CHLD: -- cgit v0.10.2 From 3ead7c52bdb0ab44f4bb1feed505a8323cc12ba7 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 6 Aug 2015 15:46:33 -0700 Subject: signalfd: fix information leak in signalfd_copyinfo This function may copy the si_addr_lsb field to user mode when it hasn't been initialized, which can leak kernel stack data to user mode. Just checking the value of si_code is insufficient because the same si_code value is shared between multiple signals. This is solved by checking the value of si_signo in addition to si_code. Signed-off-by: Amanieu d'Antras Cc: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/signalfd.c b/fs/signalfd.c index 7e412ad..270221f 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ - if (kinfo->si_code == BUS_MCEERR_AR || - kinfo->si_code == BUS_MCEERR_AO) + if (kinfo->si_signo == SIGBUS && + (kinfo->si_code == BUS_MCEERR_AR || + kinfo->si_code == BUS_MCEERR_AO)) err |= __put_user((short) kinfo->si_addr_lsb, &uinfo->ssi_addr_lsb); #endif -- cgit v0.10.2 From 3e810ae2db76ccde770fd8e5a0de6408ea36e211 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 6 Aug 2015 15:46:36 -0700 Subject: mm/slub: allow merging when SLAB_DEBUG_FREE is set This patch fixes creation of new kmem-caches after enabling sanity_checks for existing mergeable kmem-caches in runtime: before that patch creation fails because unique name in sysfs already taken by existing kmem-cache. Unlike other debug options this doesn't change object layout and could be enabled and disabled at any time. Signed-off-by: Konstantin Khlebnikov Acked-by: Christoph Lameter Cc: Pekka Enberg Acked-by: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/slab_common.c b/mm/slab_common.c index 3e5f8f2..8683110 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache; SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_FAILSLAB) -#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_CACHE_DMA | SLAB_NOTRACK) +#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK) /* * Merge control. If this is set then no merging of slab caches will occur. -- cgit v0.10.2 From 447f6a95a9c80da7faaec3e66e656eab8f262640 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Thu, 6 Aug 2015 15:46:39 -0700 Subject: lib/iommu-common.c: do not use 0xffffffffffffffffl for computing align_mask Using a 64 bit constant generates "warning: integer constant is too large for 'long' type" on 32 bit platforms. Instead use ~0ul and BITS_PER_LONG. Detected by Andrew Morton on ARMD. Signed-off-by: Sowmini Varadhan Cc: Benjamin Herrenschmidt Cc: David S. Miller Cc: Guenter Roeck Cc: Rasmus Villemoes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/lib/iommu-common.c b/lib/iommu-common.c index df30632..ff19f66 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c @@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, unsigned long align_mask = 0; if (align_order > 0) - align_mask = 0xffffffffffffffffl >> (64 - align_order); + align_mask = ~0ul >> (BITS_PER_LONG - align_order); /* Sanity check */ if (unlikely(npages == 0)) { -- cgit v0.10.2 From 8f2f3eb59dff4ec538de55f2e0592fec85966aab Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 6 Aug 2015 15:46:42 -0700 Subject: fsnotify: fix oops in fsnotify_clear_marks_by_group_flags() fsnotify_clear_marks_by_group_flags() can race with fsnotify_destroy_marks() so that when fsnotify_destroy_mark_locked() drops mark_mutex, a mark from the list iterated by fsnotify_clear_marks_by_group_flags() can be freed and thus the next entry pointer we have cached may become stale and we dereference free memory. Fix the problem by first moving marks to free to a special private list and then always free the first entry in the special list. This method is safe even when entries from the list can disappear once we drop the lock. Signed-off-by: Jan Kara Reported-by: Ashish Sangwan Reviewed-by: Ashish Sangwan Cc: Lino Sanfilippo Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 92e48c7..39ddcaf 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) { struct fsnotify_mark *lmark, *mark; + LIST_HEAD(to_free); + /* + * We have to be really careful here. Anytime we drop mark_mutex, e.g. + * fsnotify_clear_marks_by_inode() can come and free marks. Even in our + * to_free list so we have to use mark_mutex even when accessing that + * list. And freeing mark requires us to drop mark_mutex. So we can + * reliably free only the first mark in the list. That's why we first + * move marks to free to to_free list in one go and then free marks in + * to_free list one by one. + */ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { - if (mark->flags & flags) { - fsnotify_get_mark(mark); - fsnotify_destroy_mark_locked(mark, group); - fsnotify_put_mark(mark); - } + if (mark->flags & flags) + list_move(&mark->g_list, &to_free); } mutex_unlock(&group->mark_mutex); + + while (1) { + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + if (list_empty(&to_free)) { + mutex_unlock(&group->mark_mutex); + break; + } + mark = list_first_entry(&to_free, struct fsnotify_mark, g_list); + fsnotify_get_mark(mark); + fsnotify_destroy_mark_locked(mark, group); + mutex_unlock(&group->mark_mutex); + fsnotify_put_mark(mark); + } } /* -- cgit v0.10.2 From 18896451eaeee497ef5c397d76902c6376a8787d Mon Sep 17 00:00:00 2001 From: David Kershner Date: Thu, 6 Aug 2015 15:46:45 -0700 Subject: kthread: export kthread functions The s-Par visornic driver, currently in staging, processes a queue being serviced by the an s-Par service partition. We can get a message that something has happened with the Service Partition, when that happens, we must not access the channel until we get a message that the service partition is back again. The visornic driver has a thread for processing the channel, when we get the message, we need to be able to park the thread and then resume it when the problem clears. We can do this with kthread_park and unpark but they are not exported from the kernel, this patch exports the needed functions. Signed-off-by: David Kershner Acked-by: Ingo Molnar Acked-by: Neil Horman Acked-by: Thomas Gleixner Cc: Richard Weinberger Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/kernel/kthread.c b/kernel/kthread.c index 10e489c..fdea0be 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -97,6 +97,7 @@ bool kthread_should_park(void) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); } +EXPORT_SYMBOL_GPL(kthread_should_park); /** * kthread_freezable_should_stop - should this freezable kthread return now? @@ -171,6 +172,7 @@ void kthread_parkme(void) { __kthread_parkme(to_kthread(current)); } +EXPORT_SYMBOL_GPL(kthread_parkme); static int kthread(void *_create) { @@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k) if (kthread) __kthread_unpark(k, kthread); } +EXPORT_SYMBOL_GPL(kthread_unpark); /** * kthread_park - park a thread created by kthread_create(). @@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k) } return ret; } +EXPORT_SYMBOL_GPL(kthread_park); /** * kthread_stop - stop a thread created by kthread_create(). -- cgit v0.10.2 From 32e5a2a2be6b085febaac36efff495ad65a55e6c Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 6 Aug 2015 15:46:48 -0700 Subject: ocfs2: fix shift left overflow When using a large volume, for example 9T volume with 2T already used, frequent creation of small files with O_DIRECT when the IO is not cluster aligned may clear sectors in the wrong place. This will cause filesystem corruption. This is because p_cpos is a u32. When calculating the corresponding sector it should be converted to u64 first, otherwise it may overflow. Signed-off-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: [4.0+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1a35c61..0f5fd9d 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb, if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { u64 s = i_size_read(inode); - sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + + sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) + (do_div(s, osb->s_clustersize) >> 9); ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, @@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); ret = blkdev_issue_zeroout(osb->sb->s_bdev, - p_cpos << (osb->s_clustersize_bits - 9), + (u64)p_cpos << (osb->s_clustersize_bits - 9), zero_len_head >> 9, GFP_NOFS, false); if (ret < 0) mlog_errno(ret); -- cgit v0.10.2 From e298ff75f133f2524bb6a9a305b17c5f6ff1a6b2 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 6 Aug 2015 15:46:51 -0700 Subject: mm: initialize hotplugged pages as reserved Commit 92923ca3aace ("mm: meminit: only set page reserved in the memblock region") broke memory hotplug which expects the memmap for newly added sections to be reserved until onlined by online_pages_range(). This patch marks hotplugged pages as reserved when adding new zones. Signed-off-by: Mel Gorman Reported-by: David Vrabel Tested-by: David Vrabel Cc: Nathan Zimmer Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 26fbba7..003dbe4 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; - unsigned long flags; + unsigned long flags, pfn; int ret; zone_type = zone - pgdat->node_zones; @@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); + + /* online_page_range is called later and expects pages reserved */ + for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) { + if (!pfn_valid(pfn)) + continue; + + SetPageReserved(pfn_to_page(pfn)); + } return 0; } -- cgit v0.10.2 From e1832f2923ec92d0e590e496c8890675457f8568 Mon Sep 17 00:00:00 2001 From: Stephen Smalley Date: Thu, 6 Aug 2015 15:46:55 -0700 Subject: ipc: use private shmem or hugetlbfs inodes for shm segments. The shm implementation internally uses shmem or hugetlbfs inodes for shm segments. As these inodes are never directly exposed to userspace and only accessed through the shm operations which are already hooked by security modules, mark the inodes with the S_PRIVATE flag so that inode security initialization and permission checking is skipped. This was motivated by the following lockdep warning: ====================================================== [ INFO: possible circular locking dependency detected ] 4.2.0-0.rc3.git0.1.fc24.x86_64+debug #1 Tainted: G W ------------------------------------------------------- httpd/1597 is trying to acquire lock: (&ids->rwsem){+++++.}, at: shm_close+0x34/0x130 but task is already holding lock: (&mm->mmap_sem){++++++}, at: SyS_shmdt+0x4b/0x180 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (&mm->mmap_sem){++++++}: lock_acquire+0xc7/0x270 __might_fault+0x7a/0xa0 filldir+0x9e/0x130 xfs_dir2_block_getdents.isra.12+0x198/0x1c0 [xfs] xfs_readdir+0x1b4/0x330 [xfs] xfs_file_readdir+0x2b/0x30 [xfs] iterate_dir+0x97/0x130 SyS_getdents+0x91/0x120 entry_SYSCALL_64_fastpath+0x12/0x76 -> #2 (&xfs_dir_ilock_class){++++.+}: lock_acquire+0xc7/0x270 down_read_nested+0x57/0xa0 xfs_ilock+0x167/0x350 [xfs] xfs_ilock_attr_map_shared+0x38/0x50 [xfs] xfs_attr_get+0xbd/0x190 [xfs] xfs_xattr_get+0x3d/0x70 [xfs] generic_getxattr+0x4f/0x70 inode_doinit_with_dentry+0x162/0x670 sb_finish_set_opts+0xd9/0x230 selinux_set_mnt_opts+0x35c/0x660 superblock_doinit+0x77/0xf0 delayed_superblock_init+0x10/0x20 iterate_supers+0xb3/0x110 selinux_complete_init+0x2f/0x40 security_load_policy+0x103/0x600 sel_write_load+0xc1/0x750 __vfs_write+0x37/0x100 vfs_write+0xa9/0x1a0 SyS_write+0x58/0xd0 entry_SYSCALL_64_fastpath+0x12/0x76 ... Signed-off-by: Stephen Smalley Reported-by: Morten Stevens Acked-by: Hugh Dickins Acked-by: Paul Moore Cc: Manfred Spraul Cc: Davidlohr Bueso Cc: Prarit Bhargava Cc: Eric Paris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 0cf74df..973c24c 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out_dentry; + if (creat_flags == HUGETLB_SHMFS_INODE) + inode->i_flags |= S_PRIVATE; file = ERR_PTR(-ENOMEM); if (hugetlb_reserve_pages(inode, 0, diff --git a/ipc/shm.c b/ipc/shm.c index 06e5cf2..4aef24d 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; - file = shmem_file_setup(name, size, acctflag); + file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) diff --git a/mm/shmem.c b/mm/shmem.c index 4caf8ed..dbe0c1e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3363,8 +3363,8 @@ put_path: * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be * kernel internal. There will be NO LSM permission checks against the * underlying inode. So users of this interface must do LSM checks at a - * higher layer. The one user is the big_key implementation. LSM checks - * are provided at the key level rather than the inode level. + * higher layer. The users are the big_key and shm implementations. LSM + * checks are provided at the key or shm level rather than the inode. * @name: name for dentry (to be seen in /proc//maps * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size -- cgit v0.10.2 From a09233f3e1b77dbf50851660533e008056553a2a Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 6 Aug 2015 15:46:58 -0700 Subject: mm/memory-failure: unlock_page before put_page Recently I addressed a few of hwpoison race problems and the patches are merged on v4.2-rc1. It made progress, but unfortunately some problems still remain due to less coverage of my testing. So I'm trying to fix or avoid them in this series. One point I'm expecting to discuss is that patch 4/5 changes the page flag set to be checked on free time. In current behavior, __PG_HWPOISON is not supposed to be set when the page is freed. I think that there is no strong reason for this behavior, and it causes a problem hard to fix only in error handler side (because __PG_HWPOISON could be set at arbitrary timing.) So I suggest to change it. With this patchset, hwpoison stress testing in official mce-test testsuite (which previously failed) passes. This patch (of 5): In "just unpoisoned" path, we do put_page and then unlock_page, which is a wrong order and causes "freeing locked page" bug. So let's fix it. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Dean Nelson Cc: Tony Luck Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c53543d..04d6770 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1209,9 +1209,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); atomic_long_sub(nr_pages, &num_poisoned_pages); + unlock_page(hpage); put_page(hpage); - res = 0; - goto out; + return 0; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) -- cgit v0.10.2 From a209ef09af0dc921311d0cc4a1d4f926321d91b8 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 6 Aug 2015 15:47:01 -0700 Subject: mm/memory-failure: fix race in counting num_poisoned_pages When memory_failure() is called on a page which are just freed after page migration from soft offlining, the counter num_poisoned_pages is raised twi= ce. So let's fix it with using TestSetPageHWPoison. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Dean Nelson Cc: Tony Luck Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 04d6770..f72d2fa 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1671,8 +1671,8 @@ static int __soft_offline_page(struct page *page, int flags) if (ret > 0) ret = -EIO; } else { - SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", -- cgit v0.10.2 From 98ed2b0052e68420f1bad6c81e3f2600d25023e7 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 6 Aug 2015 15:47:04 -0700 Subject: mm/memory-failure: give up error handling for non-tail-refcounted thp "non anonymous thp" case is still racy with freeing thp, which causes panic due to put_page() for refcount-0 page. It seems that closing up this race might be hard (and/or not worth doing,) so let's give up the error handling for this case. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Dean Nelson Cc: Tony Luck Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f72d2fa..cd98553 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page) * directly for tail pages. */ if (PageTransHuge(head)) { + /* + * Non anonymous thp exists only in allocation/free time. We + * can't handle such a case correctly, so let's give it up. + * This should be better than triggering BUG_ON when kernel + * tries to touch the "partially handled" page. + */ + if (!PageAnon(head)) { + pr_err("MCE: %#lx: non anonymous thp\n", + page_to_pfn(page)); + return 0; + } + if (get_page_unless_zero(head)) { if (PageTail(page)) get_page(page); @@ -1134,15 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) } if (!PageHuge(p) && PageTransHuge(hpage)) { - if (!PageAnon(hpage)) { - pr_err("MCE: %#lx: non anonymous thp\n", pfn); - if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); - put_page(p); - if (p != hpage) - put_page(hpage); - return -EBUSY; - } if (unlikely(split_huge_page(hpage))) { pr_err("MCE: %#lx: thp split failed\n", pfn); if (TestClearPageHWPoison(p)) -- cgit v0.10.2 From f4c18e6f7b5bbb5b528b3334115806b0d76f50f9 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 6 Aug 2015 15:47:08 -0700 Subject: mm: check __PG_HWPOISON separately from PAGE_FLAGS_CHECK_AT_* The race condition addressed in commit add05cecef80 ("mm: soft-offline: don't free target page in successful page migration") was not closed completely, because that can happen not only for soft-offline, but also for hard-offline. Consider that a slab page is about to be freed into buddy pool, and then an uncorrected memory error hits the page just after entering __free_one_page(), then VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP) is triggered, despite the fact that it's not necessary because the data on the affected page is not consumed. To solve it, this patch drops __PG_HWPOISON from page flag checks at allocation/free time. I think it's justified because __PG_HWPOISON flags is defined to prevent the page from being reused, and setting it outside the page's alloc-free cycle is a designed behavior (not a bug.) For recent months, I was annoyed about BUG_ON when soft-offlined page remains on lru cache list for a while, which is avoided by calling put_page() instead of putback_lru_page() in page migration's success path. This means that this patch reverts a major change from commit add05cecef80 about the new refcounting rule of soft-offlined pages, so "reuse window" revives. This will be closed by a subsequent patch. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Dean Nelson Cc: Tony Luck Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f34e040..41c9384 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ + 1 << PG_unevictable | __PG_MLOCKED | \ __PG_COMPOUND_LOCK) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have any flags set. It they are set, + * Pages being prepped should not have these flags set. It they are set, * there has been a kernel bug or struct page corruption. + * + * __PG_HWPOISON is exceptional because it needs to be kept beyond page's + * alloc-free cycle to prevent from reusing the page. */ -#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) +#define PAGE_FLAGS_CHECK_AT_PREP \ + (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1 << PG_private | 1 << PG_private_2) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c107094..097c7a4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page, /* after clearing PageTail the gup refcount can be released */ smp_mb__after_atomic(); - /* - * retain hwpoison flag of the poisoned tail page: - * fix for the unsuitable process killed on Guest Machine(KVM) - * by the memory-failure. - */ - page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; + page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; page_tail->flags |= (page->flags & ((1L << PG_referenced) | (1L << PG_swapbacked) | diff --git a/mm/migrate.c b/mm/migrate.c index ee401e4..f2415be 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -950,7 +950,10 @@ out: list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - if (reason != MR_MEMORY_FAILURE) + /* Soft-offlined page shouldn't go through lru cache list */ + if (reason == MR_MEMORY_FAILURE) + put_page(page); + else putback_lru_page(page); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb61f44e..beda417 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1296,6 +1296,10 @@ static inline int check_new_page(struct page *page) bad_reason = "non-NULL mapping"; if (unlikely(atomic_read(&page->_count) != 0)) bad_reason = "nonzero _count"; + if (unlikely(page->flags & __PG_HWPOISON)) { + bad_reason = "HWPoisoned (hardware-corrupted)"; + bad_flags = __PG_HWPOISON; + } if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_flags = PAGE_FLAGS_CHECK_AT_PREP; -- cgit v0.10.2 From 4491f7126063ef51081f5662bd4fcae31621a333 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 6 Aug 2015 15:47:11 -0700 Subject: mm/memory-failure: set PageHWPoison before migrate_pages() Now page freeing code doesn't consider PageHWPoison as a bad page, so by setting it before completing the page containment, we can prevent the error page from being reused just after successful page migration. I added TTU_IGNORE_HWPOISON for try_to_unmap() to make sure that the page table entry is transformed into migration entry, not to hwpoison entry. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Dean Nelson Cc: Tony Luck Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memory-failure.c b/mm/memory-failure.c index cd98553..ea5a936 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1659,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags) inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { @@ -1673,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags) pfn, ret, page->flags); if (ret > 0) ret = -EIO; - } else { - if (!TestSetPageHWPoison(page)) - atomic_long_inc(&num_poisoned_pages); + if (TestClearPageHWPoison(page)) + atomic_long_dec(&num_poisoned_pages); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", diff --git a/mm/migrate.c b/mm/migrate.c index f2415be..eb42671 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes or remove ptes */ if (page_mapped(page)) { try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| + TTU_IGNORE_HWPOISON); page_was_mapped = 1; } -- cgit v0.10.2 From a50fcb512d9539b5179b1e523641420339086995 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Thu, 6 Aug 2015 15:47:14 -0700 Subject: writeback: fix initial dirty limit The initial value of global_wb_domain.dirty_limit set by writeback_set_ratelimit() is zeroed out by the memset in wb_domain_init(). Signed-off-by: Rabin Vincent Acked-by: Tejun Heo Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 22cddd3..5cccc12 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = { */ void __init page_writeback_init(void) { + BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); + writeback_set_ratelimit(); register_cpu_notifier(&ratelimit_nb); - - BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); } /** -- cgit v0.10.2 From 14d2b7c1a96ef37eb571599c73d4a1a606b964d6 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 3 Aug 2015 17:50:11 +0200 Subject: net: fec: fix initial runtime PM refcount MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The clocks are initially active and thus the device is marked active. This still keeps the PM refcount at 0, the pm_runtime_put_autosuspend() call at the end of probe then leaves us with an invalid refcount of -1, which in turn leads to the device staying in suspended state even though netdev open had been called. Fix this by initializing the refcount to be coherent with the initial device status. Fixes: 8fff755e9f8 (net: fec: Ensure clocks are enabled while using mdio bus) Signed-off-by: Lucas Stach Tested-by: Uwe Kleine-König Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 32e3807c..271bb58 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3433,6 +3433,7 @@ fec_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); -- cgit v0.10.2 From 44922150d87cef616fd183220d43d8fde4d41390 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 6 Aug 2015 19:13:25 -0700 Subject: sparc64: Fix userspace FPU register corruptions. If we have a series of events from userpsace, with %fprs=FPRS_FEF, like follows: ETRAP ETRAP VIS_ENTRY(fprs=0x4) VIS_EXIT RTRAP (kernel FPU restore with fpu_saved=0x4) RTRAP We will not restore the user registers that were clobbered by the FPU using kernel code in the inner-most trap. Traps allocate FPU save slots in the thread struct, and FPU using sequences save the "dirty" FPU registers only. This works at the initial trap level because all of the registers get recorded into the top-level FPU save area, and we'll return to userspace with the FPU disabled so that any FPU use by the user will take an FPU disabled trap wherein we'll load the registers back up properly. But this is not how trap returns from kernel to kernel operate. The simplest fix for this bug is to always save all FPU register state for anything other than the top-most FPU save area. Getting rid of the optimized inner-slot FPU saving code ends up making VISEntryHalf degenerate into plain VISEntry. Longer term we need to do something smarter to reinstate the partial save optimizations. Perhaps the fundament error is having trap entry and exit allocate FPU save slots and restore register state. Instead, the VISEntry et al. calls should be doing that work. This bug is about two decades old. Reported-by: James Y Knight Signed-off-by: David S. Miller diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h index 1f0aa20..6424249 100644 --- a/arch/sparc/include/asm/visasm.h +++ b/arch/sparc/include/asm/visasm.h @@ -28,16 +28,10 @@ * Must preserve %o5 between VISEntryHalf and VISExitHalf */ #define VISEntryHalf \ - rd %fprs, %o5; \ - andcc %o5, FPRS_FEF, %g0; \ - be,pt %icc, 297f; \ - sethi %hi(298f), %g7; \ - sethi %hi(VISenterhalf), %g1; \ - jmpl %g1 + %lo(VISenterhalf), %g0; \ - or %g7, %lo(298f), %g7; \ - clr %o5; \ -297: wr %o5, FPRS_FEF, %fprs; \ -298: + VISEntry + +#define VISExitHalf \ + VISExit #define VISEntryHalfFast(fail_label) \ rd %fprs, %o5; \ @@ -47,7 +41,7 @@ ba,a,pt %xcc, fail_label; \ 297: wr %o5, FPRS_FEF, %fprs; -#define VISExitHalf \ +#define VISExitHalfFast \ wr %o5, 0, %fprs; #ifndef __ASSEMBLY__ diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 140527a..83aeeb1 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) +#ifdef NON_USER_COPY + VISExitHalfFast +#else VISExitHalf - +#endif brz,pn %o2, .Lexit cmp %o2, 19 ble,pn %icc, .Lsmall_unaligned diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index b320ae9..a063d84 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S @@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 stx %g3, [%g6 + TI_GSR] 2: add %g6, %g1, %g3 - cmp %o5, FPRS_DU - be,pn %icc, 6f - sll %g1, 3, %g1 + mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 + sll %g1, 3, %g1 stb %o5, [%g3 + TI_FPSAVED] rd %gsr, %g2 add %g6, %g1, %g3 @@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 .align 32 80: jmpl %g7 + %g0, %g0 nop - -6: ldub [%g3 + TI_FPSAVED], %o5 - or %o5, FPRS_DU, %o5 - add %g6, TI_FPREGS+0x80, %g2 - stb %o5, [%g3 + TI_FPSAVED] - - sll %g1, 5, %g1 - add %g6, TI_FPREGS+0xc0, %g3 - wr %g0, FPRS_FEF, %fprs - membar #Sync - stda %f32, [%g2 + %g1] ASI_BLK_P - stda %f48, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 80f - nop - - .align 32 -80: jmpl %g7 + %g0, %g0 - nop - - .align 32 -VISenterhalf: - ldub [%g6 + TI_FPDEPTH], %g1 - brnz,a,pn %g1, 1f - cmp %g1, 1 - stb %g0, [%g6 + TI_FPSAVED] - stx %fsr, [%g6 + TI_XFSR] - clr %o5 - jmpl %g7 + %g0, %g0 - wr %g0, FPRS_FEF, %fprs - -1: bne,pn %icc, 2f - srl %g1, 1, %g1 - ba,pt %xcc, vis1 - sub %g7, 8, %g7 -2: addcc %g6, %g1, %g3 - sll %g1, 3, %g1 - andn %o5, FPRS_DU, %g2 - stb %g2, [%g3 + TI_FPSAVED] - - rd %gsr, %g2 - add %g6, %g1, %g3 - stx %g2, [%g3 + TI_GSR] - add %g6, %g1, %g2 - stx %fsr, [%g2 + TI_XFSR] - sll %g1, 5, %g1 -3: andcc %o5, FPRS_DL, %g0 - be,pn %icc, 4f - add %g6, TI_FPREGS, %g2 - - add %g6, TI_FPREGS+0x40, %g3 - membar #Sync - stda %f0, [%g2 + %g1] ASI_BLK_P - stda %f16, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 4f - nop - - .align 32 -4: and %o5, FPRS_DU, %o5 - jmpl %g7 + %g0, %g0 - wr %o5, FPRS_FEF, %fprs diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1d649a9..8069ce1 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page); void VISenter(void); EXPORT_SYMBOL(VISenter); -/* CRYPTO code needs this */ -void VISenterhalf(void); -EXPORT_SYMBOL(VISenterhalf); - extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); -- cgit v0.10.2 From a0a2a6602496a45ae838a96db8b8173794b5d398 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 4 Aug 2015 15:42:47 +0800 Subject: net: Fix skb_set_peeked use-after-free bug The commit 738ac1ebb96d02e0d23bc320302a6ea94c612dec ("net: Clone skb before setting peeked flag") introduced a use-after-free bug in skb_recv_datagram. This is because skb_set_peeked may create a new skb and free the existing one. As it stands the caller will continue to use the old freed skb. This patch fixes it by making skb_set_peeked return the new skb (or the old one if unchanged). Fixes: 738ac1ebb96d ("net: Clone skb before setting peeked flag") Reported-by: Brenden Blanco Signed-off-by: Herbert Xu Tested-by: Brenden Blanco Reviewed-by: Konstantin Khlebnikov Signed-off-by: David S. Miller diff --git a/net/core/datagram.c b/net/core/datagram.c index 4967262..617088a 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -131,12 +131,12 @@ out_noerr: goto out; } -static int skb_set_peeked(struct sk_buff *skb) +static struct sk_buff *skb_set_peeked(struct sk_buff *skb) { struct sk_buff *nskb; if (skb->peeked) - return 0; + return skb; /* We have to unshare an skb before modifying it. */ if (!skb_shared(skb)) @@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb) nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); skb->prev->next = nskb; skb->next->prev = nskb; @@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb) done: skb->peeked = 1; - return 0; + return skb; } /** @@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, continue; } - error = skb_set_peeked(skb); - if (error) + skb = skb_set_peeked(skb); + error = PTR_ERR(skb); + if (IS_ERR(skb)) goto unlock_err; atomic_inc(&skb->users); -- cgit v0.10.2 From 57b229063ae6dc65036209018dc7f4290cc026bb Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Tue, 4 Aug 2015 15:40:59 +0100 Subject: xen/netback: Wake dealloc thread after completing zerocopy work Waking the dealloc thread before decrementing inflight_packets is racy because it means the thread may go to sleep before inflight_packets is decremented. If kthread_stop() has already been called, the dealloc thread may wait forever with nothing to wake it. Instead, wake the thread only after decrementing inflight_packets. Signed-off-by: Ross Lagerwall Signed-off-by: David S. Miller diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1a83e19..28577a3 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) { atomic_dec(&queue->inflight_packets); + + /* Wake the dealloc thread _after_ decrementing inflight_packets so + * that if kthread_stop() has already been called, the dealloc thread + * does not wait forever with nothing to wake it. + */ + wake_up(&queue->dealloc_wq); } int xenvif_schedulable(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1b406e7..3f44b52 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1541,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) smp_wmb(); queue->dealloc_prod++; } while (ubuf); - wake_up(&queue->dealloc_wq); spin_unlock_irqrestore(&queue->callback_lock, flags); if (likely(zerocopy_success)) -- cgit v0.10.2 From 7ba8bd75ddc6b041b5716dbb29e49df3e9cc2928 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 4 Aug 2015 18:33:34 +0200 Subject: net: pktgen: don't abuse current->state in pktgen_thread_worker() Commit 1fbe4b46caca "net: pktgen: kill the Wait for kthread_stop code in pktgen_thread_worker()" removed (in particular) the final __set_current_state(TASK_RUNNING) and I didn't notice the previous set_current_state(TASK_INTERRUPTIBLE). This triggers the warning in __might_sleep() after return. Afaics, we can simply remove both set_current_state()'s, and we could do this a long ago right after ef87979c273a2 "pktgen: better scheduler friendliness" which changed pktgen_thread_worker() to use wait_event_interruptible_timeout(). Reported-by: Huang Ying Signed-off-by: Oleg Nesterov Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 1ebdf1c..1cbd209 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg) set_freezable(); - __set_current_state(TASK_RUNNING); - while (!kthread_should_stop()) { pkt_dev = next_to_run(t); @@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg) try_to_freeze(); } - set_current_state(TASK_INTERRUPTIBLE); pr_debug("%s stopping all device\n", t->tsk->comm); pktgen_stop(t); -- cgit v0.10.2 From 355b9f9df1f0311f20087350aee8ad96eedca8a9 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 4 Aug 2015 19:06:32 +0200 Subject: bridge: netlink: account for the IFLA_BRPORT_PROXYARP attribute size and policy The attribute size wasn't accounted for in the get_slave_size() callback (br_port_get_slave_size) when it was introduced, so fix it now. Also add a policy entry for it in br_port_policy. Signed-off-by: Nikolay Aleksandrov Fixes: 958501163ddd ("bridge: Add support for IEEE 802.11 Proxy ARP") Signed-off-by: David S. Miller diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 3da5525..5390536 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -112,6 +112,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + 0; } @@ -506,6 +507,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ -- cgit v0.10.2 From 786c2077ec8e9eab37a88fc14aac4309a8061e18 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 4 Aug 2015 19:06:33 +0200 Subject: bridge: netlink: account for the IFLA_BRPORT_PROXYARP_WIFI attribute size and policy The attribute size wasn't accounted for in the get_slave_size() callback (br_port_get_slave_size) when it was introduced, so fix it now. Also add a policy entry for it in br_port_policy. Signed-off-by: Nikolay Aleksandrov Fixes: 842a9ae08a25 ("bridge: Extend Proxy ARP design to allow optional rules for Wi-Fi") Signed-off-by: David S. Miller diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 5390536..4d74a06 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -113,6 +113,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + 0; } @@ -508,6 +509,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ -- cgit v0.10.2 From 7ebc482202fd54e7cf718619e869f4320b8e3bb6 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 4 Aug 2015 22:11:43 +0200 Subject: r8169: enforce RX_MULTI_EN on rtl8168ep/8111ep chips Enforcing this flag in RxConfig for the mentioned chips fixes netdev watchdog issues prepended with AMD IOMMU message(s) like: AMD-Vi: Event logged [IO_PAGE_FAULT device=01:00.0 domain=0x001d address=0x0000000000003000 flags=0x0050] Note that this flag is also set in Realtek's own driver for these chips. Signed-off-by: Ivan Vecera Tested-by: Alexander Lindqvist Acked-by: Francois Romieu Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3df51fa..f790f61 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); -- cgit v0.10.2 From 22f54bf932a0eed73134b696b238db4bdcff5cd4 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Tue, 4 Aug 2015 20:25:55 +0100 Subject: net: thunderx: remove effective "default y" from Kconfig if ARCH_THUNDER=y As well as for kernels built only for ThunderX ARCH_THUNDERX is also enabled for kernels which support multiple platforms (such as distro kernels). Thus "default ARCH_THUNDER" is inappropriate. I believe default m is equally frowned upon, so remove the line completely rather than "default m if ARCH_THUNDER". Signed-off-by: Ian Campbell Cc: Sunil Goutham Cc: Robert Richter Cc: Derek Chickles Cc: Satanand Burla Cc: Felix Manlunas Cc: Raghu Vatsavayi Cc: Arnd Bergmann Cc: linux-arm-kernel@lists.infradead.org Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index c4d6bbe..02e23e6 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" depends on 64BIT - default ARCH_THUNDER select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. @@ -29,14 +28,12 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" depends on 64BIT - default ARCH_THUNDER ---help--- This driver supports Thunder's NIC virtual function config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT - default ARCH_THUNDER ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. -- cgit v0.10.2 From 866b8b18e380f810ba96e21d25843b841271bb07 Mon Sep 17 00:00:00 2001 From: WingMan Kwok Date: Tue, 4 Aug 2015 16:56:53 -0400 Subject: net: netcp: fix unused interface rx buffer size configuration Prior to this patch, rx buffer size for each rx queue of an interface is configurable through dts bindings. But for an interface, the first rx queue's rx buffer size is always the usual MTU size (plus usual overhead) and page size for the remaining rx queues (if they are enabled by specifying a non-zero rx queue depth dts binding of the corresponding interface). This patch removes the rx buffer size configuration capability. Signed-off-by: WingMan Kwok Acked-by: Murali Karicheri Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index a8a7306..bb1bb72 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -85,7 +85,6 @@ struct netcp_intf { struct list_head rxhook_list_head; unsigned int rx_queue_id; void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; - u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9749dfd..4755838 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -34,6 +34,7 @@ #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) #define NETCP_NAPI_WEIGHT 64 #define NETCP_TX_TIMEOUT (5 * HZ) +#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) #define NETCP_MIN_PACKET_SIZE ETH_ZLEN #define NETCP_MAX_MCAST_ADDR 16 @@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (likely(fdq == 0)) { unsigned int primary_buf_len; /* Allocate a primary receive queue entry */ - buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; + buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; primary_buf_len = SKB_DATA_ALIGN(buf_len) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (primary_buf_len <= PAGE_SIZE) { - bufptr = netdev_alloc_frag(primary_buf_len); - pad[1] = primary_buf_len; - } else { - bufptr = kmalloc(primary_buf_len, GFP_ATOMIC | - GFP_DMA32 | __GFP_COLD); - pad[1] = 0; - } + bufptr = netdev_alloc_frag(primary_buf_len); + pad[1] = primary_buf_len; if (unlikely(!bufptr)) { - dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); + dev_warn_ratelimited(netcp->ndev_dev, + "Primary RX buffer alloc failed\n"); goto fail; } dma = dma_map_single(netcp->dev, bufptr, buf_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(netcp->dev, dma))) + goto fail; + pad[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ - page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); + page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); if (unlikely(!page)) { dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); goto fail; @@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) /* Map the linear buffer */ dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); - if (unlikely(!dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); return NULL; } @@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) knav_queue_disable_notify(netcp->rx_queue); /* open Rx FDQs */ - for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && - netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; + ++i) { snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { @@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device, netcp->rx_queue_depths[0] = 128; } - ret = of_property_read_u32_array(node_interface, "rx-buffer-size", - netcp->rx_buffer_sizes, - KNAV_DMA_FDQ_PER_CHAN); - if (ret) { - dev_err(dev, "missing \"rx-buffer-size\" parameter\n"); - netcp->rx_buffer_sizes[0] = 1536; - } - ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); if (ret < 0) { dev_err(dev, "missing \"rx-pool\" parameter\n"); -- cgit v0.10.2 From 4f7eb70f7be1099236670f36b2f1f90a63e29699 Mon Sep 17 00:00:00 2001 From: Mathieu Olivari Date: Tue, 4 Aug 2015 17:25:02 -0700 Subject: stmmac: dwmac-ipq806x: fix static checker warning The patch b1c17215d718: "stmmac: add ipq806x glue layer", leads to the following static checker warning: .../stmmac/dwmac-ipq806x.c:314 ipq806x_gmac_probe() warn: double left shift '1 << (1 << gmac->id)' The NSS_COMMON_CLK_SRC_CTRL_OFFSET macro is used once as an offset, and once as a mask, which is a bug indeed. We'll fix it by defining the offset as the real offset value and computing the mask from it when required. Tested on IPQ806x ref designs AP148 & DB149. Reported-by: Dan Carpenter Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 7e3129e..f0e4bb4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -42,7 +42,7 @@ #define NSS_COMMON_CLK_DIV_MASK 0x7f #define NSS_COMMON_CLK_SRC_CTRL 0x14 -#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) +#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x) /* Mode is coded on 1 bit but is different depending on the MAC ID: * MAC0: QSGMII=0 RGMII=1 * MAC1: QSGMII=0 SGMII=0 RGMII=1 @@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) /* Configure the clock src according to the mode */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); - val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id)); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << -- cgit v0.10.2 From 48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 5 Aug 2015 10:34:04 +0800 Subject: virtio-net: drop NETIF_F_FRAGLIST virtio declares support for NETIF_F_FRAGLIST, but assumes that there are at most MAX_SKB_FRAGS + 2 fragments which isn't always true with a fraglist. A longer fraglist in the skb will make the call to skb_to_sgvec overflow the sg array, leading to memory corruption. Drop NETIF_F_FRAGLIST so we only get what we can handle. Cc: Michael S. Tsirkin Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7fbca37..237f8e5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev) /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ - dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (csum) - dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO -- cgit v0.10.2 From 10971638701dedadb58c88ce4d31c9375b224ed6 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 7 Aug 2015 13:01:39 +0530 Subject: ARCv2: spinlock/rwlock/atomics: reduce 1 instruction in exponential backoff The increment of delay counter was 2 instructions: Arithmatic Shfit Left (ASL) + set to 1 on overflow This can be done in 1 using ROtate Left (ROL) Suggested-by: Nigel Topham Cc: Peter Zijlstra (Intel) Cc: linux-kernel@vger.kernel.org Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 629dfd0..87d18ae 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -34,8 +34,7 @@ " mov %[tmp], %[delay] \n" /* tmp = delay */ \ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ - " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \ - " mov.z %[delay], 1 \n" /* handle overflow */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ " b 1b \n" /* start over */ \ "4: ; --- success --- \n" \ diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 7071fc0..db8c59d 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -260,8 +260,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) " mov %[tmp], %[delay] \n" /* tmp = delay */ \ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ - " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \ - " mov.z %[delay], 1 \n" /* handle overflow */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ " b 1b \n" /* start over */ \ " \n" \ "4: ; --- done --- \n" \ -- cgit v0.10.2 From 209e4dbc8dcdb2b1839f18fd1cf07ec7bedadf4d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 7 Aug 2015 12:31:17 +0200 Subject: drm/vblank: Use u32 consistently for vblank counters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 99264a61dfcda41d86d0960cf2d4c0fc2758a773 Author: Daniel Vetter Date: Wed Apr 15 19:34:43 2015 +0200 drm/vblank: Fixup and document timestamp update/read barriers I've switched vblank->count from atomic_t to unsigned long and accidentally created an integer comparison bug in drm_vblank_count_and_time since vblanke->count might overflow the u32 local copy and hence the retry loop never succeed. Fix this by consistently using u32. Cc: Michel Dänzer Reported-by: Michel Dänzer Reviewed-by: Thierry Reding Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f9cc68f..b50fa0a 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600) module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); static void store_vblank(struct drm_device *dev, int crtc, - unsigned vblank_count_inc, + u32 vblank_count_inc, struct timeval *t_vblank) { struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 48db6a5..5aa5197 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -691,7 +691,7 @@ struct drm_vblank_crtc { struct timer_list disable_timer; /* delayed disable timer */ /* vblank counter, protected by dev->vblank_time_lock for writes */ - unsigned long count; + u32 count; /* vblank timestamps, protected by dev->vblank_time_lock for writes */ struct timeval time[DRM_VBLANKTIME_RBSIZE]; -- cgit v0.10.2 From aa0cd28d057fd4cb686fbdd2475a6a3f609dd581 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Fri, 7 Aug 2015 16:33:01 +0100 Subject: dm btree remove: fix bug in remove_one() remove_one() was not incrementing the key for the beginning of the range, so not all entries were being removed. This resulted in discards that were not unmapping all blocks. Fixes: 4ec331c3ea ("dm btree: add dm_btree_remove_leaves()") Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 9836c0a..9ca9ecc 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -689,6 +689,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, value_ptr(n, index)); delete_at(n, index); + keys[last_level] = k + 1ull; } else r = -ENODATA; -- cgit v0.10.2 From bcc84140a62c04f522eacceb793e6eef92965c84 Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Wed, 5 Aug 2015 03:27:48 -0400 Subject: be2net: enable IFACE filters only after creating RXQs HW issues were observed on Lancer adapters if IFACE filters (flags, mac addrs etc) are enabled *before* creating RXQs. This patch changes the driver design by enabling filters in be_open() -- instead of be_setup() -- after RXQs are created and buffers posted. Two new wrapper functions, be_enable_if_filters() and be_disable_if_filters() are introduced to enable/disable IFACE filters in be_open()/be_close() respectively. In be_setup() the IFACE is now created only with the RSS flag. Signed-off-by: Kalesh AP Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 2716e6f..00e3a6b 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -620,6 +620,11 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) +#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ + BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) + +#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f64242..7730f21 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) return 0; + /* if device is not running, copy MAC to netdev->dev_addr */ + if (!netif_running(netdev)) + goto done; + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the @@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) status = -EPERM; goto err; } - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - dev_info(dev, "MAC address changed to %pM\n", mac); +done: + ether_addr_copy(netdev->dev_addr, addr->sa_data); + dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; err: dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); @@ -3361,6 +3365,33 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) } } +static void be_disable_if_filters(struct be_adapter *adapter) +{ + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[0], 0); + + be_clear_uc_list(adapter); + + /* The IFACE flags are enabled in the open path and cleared + * in the close path. When a VF gets detached from the host and + * assigned to a VM the following happens: + * - VF's IFACE flags get cleared in the detach path + * - IFACE create is issued by the VF in the attach path + * Due to a bug in the BE3/Skyhawk-R FW + * (Lancer FW doesn't have the bug), the IFACE capability flags + * specified along with the IFACE create cmd issued by a VF are not + * honoured by FW. As a consequence, if a *new* driver + * (that enables/disables IFACE flags in open/close) + * is loaded in the host and an *old* driver is * used by a VM/VF, + * the IFACE gets created *without* the needed flags. + * To avoid this, disable RX-filter flags only for Lancer. + */ + if (lancer_chip(adapter)) { + be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF); + adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS; + } +} + static int be_close(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3373,6 +3404,8 @@ static int be_close(struct net_device *netdev) if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; + be_disable_if_filters(adapter); + be_roce_dev_close(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { @@ -3392,7 +3425,6 @@ static int be_close(struct net_device *netdev) be_tx_compl_clean(adapter); be_rx_qs_destroy(adapter); - be_clear_uc_list(adapter); for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) @@ -3477,6 +3509,31 @@ static int be_rx_qs_create(struct be_adapter *adapter) return 0; } +static int be_enable_if_filters(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + if (status) + return status; + + /* For BE3 VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr, + adapter->if_handle, + &adapter->pmac_id[0], 0); + if (status) + return status; + } + + if (adapter->vlans_added) + be_vid_config(adapter); + + be_set_rx_mode(adapter->netdev); + + return 0; +} + static int be_open(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3490,6 +3547,10 @@ static int be_open(struct net_device *netdev) if (status) goto err; + status = be_enable_if_filters(adapter); + if (status) + goto err; + status = be_irq_register(adapter); if (status) goto err; @@ -3686,16 +3747,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -static void be_mac_clear(struct be_adapter *adapter) -{ - if (adapter->pmac_id) { - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[0], 0); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - } -} - #ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { @@ -3770,8 +3821,8 @@ static int be_clear(struct be_adapter *adapter) #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); #endif - /* delete the primary mac along with the uc-mac list */ - be_mac_clear(adapter); + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -3782,25 +3833,11 @@ static int be_clear(struct be_adapter *adapter) return 0; } -static int be_if_create(struct be_adapter *adapter, u32 *if_handle, - u32 cap_flags, u32 vf) -{ - u32 en_flags; - - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - - en_flags &= cap_flags; - - return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); -} - static int be_vfs_if_create(struct be_adapter *adapter) { struct be_resources res = {0}; + u32 cap_flags, en_flags, vf; struct be_vf_cfg *vf_cfg; - u32 cap_flags, vf; int status; /* If a FW profile exists, then cap_flags are updated */ @@ -3821,8 +3858,12 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - status = be_if_create(adapter, &vf_cfg->if_handle, - cap_flags, vf + 1); + en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST | + BE_IF_FLAGS_PASS_L3L4_ERRORS); + status = be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); if (status) return status; } @@ -4194,15 +4235,8 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } else { - /* Maybe the HW was reset; dev_addr must be re-programmed */ - memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } - /* For BE3-R VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) - be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); return 0; } @@ -4342,6 +4376,7 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + u32 en_flags; int status; status = be_func_init(adapter); @@ -4364,8 +4399,11 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - status = be_if_create(adapter, &adapter->if_handle, - be_if_cap_flags(adapter), 0); + /* will enable all the needed filter flags in be_open() */ + en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + en_flags = en_flags & be_if_cap_flags(adapter); + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); if (status) goto err; @@ -4391,11 +4429,6 @@ static int be_setup(struct be_adapter *adapter) dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } - if (adapter->vlans_added) - be_vid_config(adapter); - - be_set_rx_mode(adapter->netdev); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) -- cgit v0.10.2 From 99b44304f205a826501721d41928e87b0b9cf3b3 Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Wed, 5 Aug 2015 03:27:49 -0400 Subject: be2net: post buffers before destroying RXQs in Lancer An RX stall issue was seen on Lancer adapters, when RXQs are destroyed while they are in an "out of buffer" state. This patch fixes this issue by posting 64 buffers to each RXQ before destroying them in the close path. This is done after ensuring that no more new packets are selected for transfer to the RXQs by disabling interface filters. Signed-off-by: Kalesh AP Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7730f21..14ae67a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2451,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo) be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); } -static void be_rx_cq_clean(struct be_rx_obj *rxo) +/* Free posted rx buffers that were not used */ +static void be_rxq_clean(struct be_rx_obj *rxo) { - struct be_rx_page_info *page_info; struct be_queue_info *rxq = &rxo->q; + struct be_rx_page_info *page_info; + + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); + rxq->tail = 0; + rxq->head = 0; +} + +static void be_rx_cq_clean(struct be_rx_obj *rxo) +{ struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; @@ -2491,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) /* After cleanup, leave the CQ in unarmed state */ be_cq_notify(adapter, rx_cq->id, false, 0); - - /* Then free posted rx buffers that were not used */ - while (atomic_read(&rxq->used) > 0) { - page_info = get_rx_page_info(rxo); - put_page(page_info->page); - memset(page_info, 0, sizeof(*page_info)); - } - BUG_ON(atomic_read(&rxq->used)); - rxq->tail = 0; - rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) @@ -3358,8 +3362,22 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) for_all_rx_queues(adapter, rxo, i) { q = &rxo->q; if (q->created) { + /* If RXQs are destroyed while in an "out of buffer" + * state, there is a possibility of an HW stall on + * Lancer. So, post 64 buffers to each queue to relieve + * the "out of buffer" condition. + * Make sure there's space in the RXQ before posting. + */ + if (lancer_chip(adapter)) { + be_rx_cq_clean(rxo); + if (atomic_read(&q->used) == 0) + be_post_rx_frags(rxo, GFP_KERNEL, + MAX_RX_POST); + } + be_cmd_rxq_destroy(adapter, q); be_rx_cq_clean(rxo); + be_rxq_clean(rxo); } be_queue_free(adapter, q); } -- cgit v0.10.2 From 649886a36b5f023811321819eceaa8ba66444e3b Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Wed, 5 Aug 2015 03:27:50 -0400 Subject: be2net: protect eqo->affinity_mask from getting freed twice There are paths in the driver such as an unrecoverable error (UE) detection followed by a driver unload wherein be_clear() is invoked twice. Individual data structures are reset so that they are not cleaned/freed twice. This patch does the same for eqo->affinity_mask. It is freed only if EQs haven't yet been destroyed. This fixes a possible crash when affinity_mask is freed twice. Signed-off-by: Kalesh AP Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 14ae67a..c28e3bf 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2584,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); + free_cpumask_var(eqo->affinity_mask); } - free_cpumask_var(eqo->affinity_mask); be_queue_free(adapter, &eqo->q); } } @@ -2602,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); - if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) - return -ENOMEM; - cpumask_set_cpu(cpumask_local_spread(i, numa_node), - eqo->affinity_mask); - netif_napi_add(adapter->netdev, &eqo->napi, be_poll, - BE_NAPI_WEIGHT); - napi_hash_add(&eqo->napi); + aic = &adapter->aic_obj[i]; eqo->adapter = adapter; eqo->idx = i; @@ -2624,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter) rc = be_cmd_eq_create(adapter, eqo); if (rc) return rc; + + if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, + BE_NAPI_WEIGHT); + napi_hash_add(&eqo->napi); } return 0; } -- cgit v0.10.2 From 6b30c73e9f37183ad60c7f7050acf8e8edf91e9c Mon Sep 17 00:00:00 2001 From: Duson Lin Date: Fri, 7 Aug 2015 14:37:24 -0700 Subject: Input: elantech - add special check for fw_version 0x470f01 touchpad It is no need to check the packet[0] for sanity check when doing elantech_packet_check_v4() function for fw_version = 0x470f01 touchpad. Signed-off by: Duson Lin Reviewed-by: Ulrik De Bie Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 22b9ca9..2955f1d 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; unsigned char packet_type = packet[3] & 0x03; + unsigned int ic_version; bool sanity_check; if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) return PACKET_TRACKPOINT; + /* This represents the version of IC body. */ + ic_version = (etd->fw_version & 0x0f0000) >> 16; + /* * Sanity check based on the constant bits of a packet. * The constant bits change depending on the value of - * the hardware flag 'crc_enabled' but are the same for - * every packet, regardless of the type. + * the hardware flag 'crc_enabled' and the version of + * the IC body, but are the same for every packet, + * regardless of the type. */ if (etd->crc_enabled) sanity_check = ((packet[3] & 0x08) == 0x00); + else if (ic_version == 7 && etd->samples[1] == 0x2A) + sanity_check = ((packet[3] & 0x1c) == 0x10); else sanity_check = ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0x1c) == 0x10); @@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Avatar AVIU-145A2 0x361f00 ? clickpad * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) @@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse) etd->capabilities[0], etd->capabilities[1], etd->capabilities[2]); + if (etd->hw_version != 1) { + if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) { + psmouse_err(psmouse, "failed to query sample data\n"); + goto init_fail; + } + psmouse_info(psmouse, + "Elan sample query result %02x, %02x, %02x\n", + etd->samples[0], etd->samples[1], etd->samples[2]); + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index f965d15..e1cbf40 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -129,6 +129,7 @@ struct elantech_data { unsigned char reg_26; unsigned char debug; unsigned char capabilities[3]; + unsigned char samples[3]; bool paritycheck; bool jumpy_cursor; bool reports_pressure; -- cgit v0.10.2 From fe1e1876d8f6d8d4b45e3940e6dd43cd3b18d958 Mon Sep 17 00:00:00 2001 From: Carol L Soto Date: Wed, 5 Aug 2015 11:05:32 -0500 Subject: net/mlx5_core: Set log_uar_page_sz for non 4K page size architecture failed to configure the page size for architectures with page size different than 4K. Fixes: 938fe83 ("net/mlx5_core: New device capabilities handling") Signed-off-by: Carol L Soto Acked-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index afad529..06e3e1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); + err = set_caps(dev, set_ctx, set_sz); query_ex: -- cgit v0.10.2 From f7644cbfcdf03528f0f450f3940c4985b2291f49 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 9 Aug 2015 15:54:30 -0400 Subject: Linux 4.2-rc6 diff --git a/Makefile b/Makefile index 15d3201..35b4c19 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* -- cgit v0.10.2 From da2e5ae56164b86823c1bff5b4d28430ca4a7108 Mon Sep 17 00:00:00 2001 From: Allen Hubbe Date: Mon, 13 Jul 2015 08:07:08 -0400 Subject: NTB: Fix ntb_transport out-of-order RX update It was possible for a synchronous update of the RX index in the error case to get ahead of the asynchronous RX index update in the normal case. Change the RX processing to preserve an RX completion order. There were two error cases. First, if a buffer is not present to receive data, there would be no queue entry to preserve the RX completion order. Instead of dropping the RX frame, leave the RX frame in the ring. Schedule RX processing when RX entries are enqueued, in case there are RX frames waiting in the ring to be received. Second, if a buffer is too small to receive data, drop the frame in the ring, mark the RX entry as done, and indicate the error in the RX entry length. Check for a negative length in the receive callback in ntb_netdev, and count occurrences as rx_length_errors. Signed-off-by: Allen Hubbe Signed-off-by: Jon Mason diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 3cc316cb..5f1ee7c 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); + if (len < 0) { + ndev->stats.rx_errors++; + ndev->stats.rx_length_errors++; + goto enqueue_again; + } + skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; @@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, return; } +enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb(skb); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index efe3ad4..98e58c7 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -142,10 +142,11 @@ struct ntb_transport_qp { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); + struct list_head rx_post_q; struct list_head rx_pend_q; struct list_head rx_free_q; - spinlock_t ntb_rx_pend_q_lock; - spinlock_t ntb_rx_free_q_lock; + /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ + spinlock_t ntb_rx_q_lock; void *rx_buff; unsigned int rx_index; unsigned int rx_max_entry; @@ -534,6 +535,27 @@ out: return entry; } +static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, + struct list_head *list, + struct list_head *to_list) +{ + struct ntb_queue_entry *entry; + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + if (list_empty(list)) { + entry = NULL; + } else { + entry = list_first_entry(list, struct ntb_queue_entry, entry); + list_move_tail(&entry->entry, to_list); + } + + spin_unlock_irqrestore(lock, flags); + + return entry; +} + static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { @@ -941,10 +963,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); - spin_lock_init(&qp->ntb_rx_pend_q_lock); - spin_lock_init(&qp->ntb_rx_free_q_lock); + spin_lock_init(&qp->ntb_rx_q_lock); spin_lock_init(&qp->ntb_tx_free_q_lock); + INIT_LIST_HEAD(&qp->rx_post_q); INIT_LIST_HEAD(&qp->rx_pend_q); INIT_LIST_HEAD(&qp->rx_free_q); INIT_LIST_HEAD(&qp->tx_free_q); @@ -1107,22 +1129,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) kfree(nt); } -static void ntb_rx_copy_callback(void *data) +static void ntb_complete_rxc(struct ntb_transport_qp *qp) { - struct ntb_queue_entry *entry = data; - struct ntb_transport_qp *qp = entry->qp; - void *cb_data = entry->cb_data; - unsigned int len = entry->len; - struct ntb_payload_header *hdr = entry->rx_hdr; + struct ntb_queue_entry *entry; + void *cb_data; + unsigned int len; + unsigned long irqflags; + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + + while (!list_empty(&qp->rx_post_q)) { + entry = list_first_entry(&qp->rx_post_q, + struct ntb_queue_entry, entry); + if (!(entry->flags & DESC_DONE_FLAG)) + break; + + entry->rx_hdr->flags = 0; + iowrite32(entry->index, &qp->rx_info->entry); + + cb_data = entry->cb_data; + len = entry->len; + + list_move_tail(&entry->entry, &qp->rx_free_q); - hdr->flags = 0; + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); - iowrite32(entry->index, &qp->rx_info->entry); + if (qp->rx_handler && qp->client_ready) + qp->rx_handler(qp, qp->cb_data, cb_data, len); - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + } - if (qp->rx_handler && qp->client_ready) - qp->rx_handler(qp, qp->cb_data, cb_data, len); + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); +} + +static void ntb_rx_copy_callback(void *data) +{ + struct ntb_queue_entry *entry = data; + + entry->flags |= DESC_DONE_FLAG; + + ntb_complete_rxc(entry->qp); } static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) @@ -1138,19 +1185,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ntb_rx_copy_callback(entry); } -static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, - size_t len) +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) { struct dma_async_tx_descriptor *txd; struct ntb_transport_qp *qp = entry->qp; struct dma_chan *chan = qp->dma_chan; struct dma_device *device; - size_t pay_off, buff_off; + size_t pay_off, buff_off, len; struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - entry->len = len; + len = entry->len; if (!chan) goto err; @@ -1226,7 +1272,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; void *offset; - int rc; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); @@ -1255,65 +1300,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) return -EIO; } - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (!entry) { dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); qp->rx_err_no_buf++; - - rc = -ENOMEM; - goto err; + return -EAGAIN; } + entry->rx_hdr = hdr; + entry->index = qp->rx_index; + if (hdr->len > entry->len) { dev_dbg(&qp->ndev->pdev->dev, "receive buffer overflow! Wanted %d got %d\n", hdr->len, entry->len); qp->rx_err_oflow++; - rc = -EIO; - goto err; - } + entry->len = -EIO; + entry->flags |= DESC_DONE_FLAG; - dev_dbg(&qp->ndev->pdev->dev, - "RX OK index %u ver %u size %d into buf size %d\n", - qp->rx_index, hdr->ver, hdr->len, entry->len); + ntb_complete_rxc(qp); + } else { + dev_dbg(&qp->ndev->pdev->dev, + "RX OK index %u ver %u size %d into buf size %d\n", + qp->rx_index, hdr->ver, hdr->len, entry->len); - qp->rx_bytes += hdr->len; - qp->rx_pkts++; + qp->rx_bytes += hdr->len; + qp->rx_pkts++; - entry->index = qp->rx_index; - entry->rx_hdr = hdr; + entry->len = hdr->len; - ntb_async_rx(entry, offset, hdr->len); + ntb_async_rx(entry, offset); + } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return 0; - -err: - /* FIXME: if this syncrhonous update of the rx_index gets ahead of - * asyncrhonous ntb_rx_copy_callback of previous entry, there are three - * scenarios: - * - * 1) The peer might miss this update, but observe the update - * from the memcpy completion callback. In this case, the buffer will - * not be freed on the peer to be reused for a different packet. The - * successful rx of a later packet would clear the condition, but the - * condition could persist if several rx fail in a row. - * - * 2) The peer may observe this update before the asyncrhonous copy of - * prior packets is completed. The peer may overwrite the buffers of - * the prior packets before they are copied. - * - * 3) Both: the peer may observe the update, and then observe the index - * decrement by the asynchronous completion callback. Who knows what - * badness that will cause. - */ - hdr->flags = 0; - iowrite32(qp->rx_index, &qp->rx_info->entry); - - return rc; } static void ntb_transport_rxc_db(unsigned long data) @@ -1333,7 +1356,7 @@ static void ntb_transport_rxc_db(unsigned long data) break; } - if (qp->dma_chan) + if (i && qp->dma_chan) dma_async_issue_pending(qp->dma_chan); if (i == qp->rx_max_entry) { @@ -1609,7 +1632,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, goto err1; entry->qp = qp; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); } @@ -1634,7 +1657,7 @@ err2: while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); err1: - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); if (qp->dma_chan) dma_release_channel(qp->dma_chan); @@ -1689,11 +1712,16 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) qp->tx_handler = NULL; qp->event_handler = NULL; - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); - while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { - dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); + kfree(entry); + } + + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); kfree(entry); } @@ -1724,14 +1752,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) if (!qp || qp->client_ready) return NULL; - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); if (!entry) return NULL; buf = entry->cb_data; *len = entry->len; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); return buf; } @@ -1757,15 +1785,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, if (!qp) return -EINVAL; - entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); if (!entry) return -ENOMEM; entry->cb_data = cb; entry->buf = data; entry->len = len; + entry->flags = 0; + + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); + tasklet_schedule(&qp->rxc_db_work); return 0; } -- cgit v0.10.2 From c8650fd03d320e9c39f44435a583933cacea5259 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 13 Jul 2015 08:07:09 -0400 Subject: NTB: Fix transport stats for multiple devices Currently the debugfs does not have files for all NTB transport queue pairs. When there are multiple NTBs present in a system, the QP names of the last transport clobber the names of previously added transport QPs. Only the last added QPs can be observed via debugfs. Create a directory per NTB transport to associate the QPs with that transport. Name the directory the same as the PCI device. Signed-off-by: Dave Jiang Signed-off-by: Jon Mason diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 98e58c7..25e973f 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -212,6 +212,8 @@ struct ntb_transport_ctx { bool link_is_up; struct delayed_work link_work; struct work_struct link_cleanup; + + struct dentry *debugfs_node_dir; }; enum { @@ -945,12 +947,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - if (nt_debugfs_dir) { + if (nt->debugfs_node_dir) { char debugfs_name[4]; snprintf(debugfs_name, 4, "qp%d", qp_num); qp->debugfs_dir = debugfs_create_dir(debugfs_name, - nt_debugfs_dir); + nt->debugfs_node_dir); qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, qp->debugfs_dir, qp, @@ -1053,6 +1055,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) goto err2; } + if (nt_debugfs_dir) { + nt->debugfs_node_dir = + debugfs_create_dir(pci_name(ndev->pdev), + nt_debugfs_dir); + } + for (i = 0; i < qp_count; i++) { rc = ntb_transport_init_queue(nt, i); if (rc) -- cgit v0.10.2 From da4eb27a2c2efd034bdd645650114b82c479329c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 13 Jul 2015 08:07:10 -0400 Subject: NTB: ntb_netdev not covering all receive errors ntb_netdev is allowing the link to come up even when -ENOMEM is returned from ntb_transport_rx_enqueue. Fix to cover all possible errors. Signed-off-by: Dave Jiang Signed-off-by: Jon Mason diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 5f1ee7c..d8757bf 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -191,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev) rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ndev->mtu + ETH_HLEN); - if (rc == -EINVAL) { + if (rc) { dev_kfree_skb(skb); goto err; } -- cgit v0.10.2 From 260bee9451b4f0f5f9845c5b3024f0bfb8de8f22 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 13 Jul 2015 08:07:11 -0400 Subject: NTB: Fix oops in debugfs when transport is half-up When the remote side is not up, we do not have all the context for the transport, and that causes NULL ptr access. Have the debugfs reads check to see if transport is up before we make access. Signed-off-by: Dave Jiang Signed-off-by: Jon Mason diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 25e973f..a049f96 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -439,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, char *buf; ssize_t ret, out_offset, out_count; + qp = filp->private_data; + + if (!qp || !qp->link_is_up) + return 0; + out_count = 1000; buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; - qp = filp->private_data; out_offset = 0; out_offset += snprintf(buf + out_offset, out_count - out_offset, "NTB QP stats\n"); -- cgit v0.10.2 From 8b5a22d8f18496f5921ccb92554a7051cbfd9b0c Mon Sep 17 00:00:00 2001 From: Allen Hubbe Date: Mon, 13 Jul 2015 08:07:12 -0400 Subject: NTB: Schedule to receive on QP link up Schedule to receive on QP link up, to make sure that the doorbell is properly cleared for interrupts. Signed-off-by: Allen Hubbe Signed-off-by: Jon Mason diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index a049f96..b82171e 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -895,6 +895,8 @@ static void ntb_qp_link_work(struct work_struct *work) if (qp->event_handler) qp->event_handler(qp->cb_data, qp->link_is_up); + + tasklet_schedule(&qp->rxc_db_work); } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); -- cgit v0.10.2 From 8c9edf63e75f036b42afb4502deb20bbfb5004b4 Mon Sep 17 00:00:00 2001 From: Allen Hubbe Date: Mon, 13 Jul 2015 08:07:13 -0400 Subject: NTB: Fix zero size or integer overflow in ntb_set_mw A plain 32 bit integer will overflow for values over 4GiB. Change the plain integer size to the appropriate size type in ntb_set_mw. Change the type of the size parameter and two local variables used for size. Even if there is no overflow, a size of zero is invalid here. Reported-by: Juyoung Jung Signed-off-by: Allen Hubbe Signed-off-by: Jon Mason diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index b82171e..bc556e2 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -629,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, - unsigned int size) + resource_size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct pci_dev *pdev = nt->ndev->pdev; - unsigned int xlat_size, buff_size; + size_t xlat_size, buff_size; int rc; + if (!size) + return -EINVAL; + xlat_size = round_up(size, mw->xlat_align_size); buff_size = round_up(size, mw->xlat_align); @@ -655,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, if (!mw->virt_addr) { mw->xlat_size = 0; mw->buff_size = 0; - dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", + dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", buff_size); return -ENOMEM; } -- cgit v0.10.2 From 30a4bb1e5a9d7e283af6e29da09362104b67d7aa Mon Sep 17 00:00:00 2001 From: Allen Hubbe Date: Mon, 13 Jul 2015 08:07:14 -0400 Subject: NTB: Fix dereference before check Remove early dereference of a pointer that is checked later in the code. Reported-by: Dan Carpenter Signed-off-by: Allen Hubbe Signed-off-by: Jon Mason diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index bc556e2..1c6386d 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1692,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct ntb_transport_ctx *nt = qp->transport; struct pci_dev *pdev; struct ntb_queue_entry *entry; u64 qp_bit; @@ -1745,7 +1744,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); - nt->qp_bitmap_free |= qp_bit; + qp->transport->qp_bitmap_free |= qp_bit; dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); } -- cgit v0.10.2 From e15f940908e474da03349cb55a107fe89310a02c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 24 Jul 2015 16:35:59 -0700 Subject: ntb: avoid format string in dev_set_name Avoid any chance of format string expansion when calling dev_set_name. Signed-off-by: Kees Cook Signed-off-by: Jon Mason diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 23435f2..2e25307 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb) ntb->dev.bus = &ntb_bus; ntb->dev.parent = &ntb->pdev->dev; ntb->dev.release = ntb_dev_release; - dev_set_name(&ntb->dev, pci_name(ntb->pdev)); + dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev)); ntb->ctx = NULL; ntb->ctx_ops = NULL; -- cgit v0.10.2 From 2701fa0864ecb9e49d47a4aa1c02f172ab79639a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 28 Jul 2015 15:31:12 +0200 Subject: fbdev: select versatile helpers for the integrator Commit 11c32d7b6274cb0f554943d65bd4a126c4a86dcd "video: move Versatile CLCD helpers" missed the fact that the Integrator/CP is also using the helper, and as a result the platform got only stubs and no graphics. Add this as a default selection to Kconfig so we have graphics again. Fixes: 11c32d7b6274 (video: move Versatile CLCD helpers) Signed-off-by: Linus Walleij Signed-off-by: Tomi Valkeinen diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 2d98de5..f888561 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -298,7 +298,7 @@ config FB_ARMCLCD # Helper logic selected only by the ARM Versatile platform family. config PLAT_VERSATILE_CLCD - def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS + def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR depends on ARM depends on FB_ARMCLCD && FB=y -- cgit v0.10.2 From 2b55cb3b04684f131a01faf2eb8e4d822d293f24 Mon Sep 17 00:00:00 2001 From: Jyri Sarha Date: Fri, 7 Aug 2015 14:04:29 +0300 Subject: OMAPDSS: Fix node refcount leak in omapdss_of_get_next_port() Fix node refcount leak in omapdss_of_get_next_port(). Signed-off-by: Jyri Sarha Signed-off-by: Tomi Valkeinen diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c index 928ee63..c8c065d 100644 --- a/drivers/video/fbdev/omap2/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/dss/dss-of.c @@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent, } prev = port; } while (of_node_cmp(port->name, "port") != 0); + + of_node_put(ports); } return port; -- cgit v0.10.2 From 6266f4b19d341c531d447d689fb44609daa06c79 Mon Sep 17 00:00:00 2001 From: Jyri Sarha Date: Fri, 7 Aug 2015 14:04:30 +0300 Subject: OMAPDSS: Fix omap_dss_find_output_by_port_node() port refcount decrement Fix omap_dss_find_output_by_port_node() port parameter refcount decrementation. The only user of dss_of_port_get_parent_device() function is omap_dss_find_output_by_port_node() and it assumes the refcount of the port parameter is not decremented by the call. Signed-off-by: Jyri Sarha Signed-off-by: Tomi Valkeinen diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c index c8c065d..bf407b6 100644 --- a/drivers/video/fbdev/omap2/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/dss/dss-of.c @@ -96,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) if (!port) return NULL; - np = of_get_next_parent(port); + np = of_get_parent(port); for (i = 0; i < 2 && np; ++i) { struct property *prop; -- cgit v0.10.2 From 9e6e35edb330619fbfa3457eff1d15d3672c833a Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 3 Aug 2015 22:15:34 +0200 Subject: video: fbdev: pxa3xx_gcu: prepare the clocks The clocks need to be prepared before being enabled. Without it a warning appears in the drivers probe path : WARNING: CPU: 0 PID: 1 at drivers/clk/clk.c:707 clk_core_enable+0x84/0xa0() Modules linked in: CPU: 0 PID: 1 Comm: swapper Not tainted 4.2.0-rc3-cm-x300+ #804 Hardware name: CM-X300 module [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (warn_slowpath_common+0x7c/0xb4) [] (warn_slowpath_common) from [] (warn_slowpath_null+0x1c/0x24) [] (warn_slowpath_null) from [] (clk_core_enable+0x84/0xa0) [] (clk_core_enable) from [] (clk_enable+0x20/0x34) [] (clk_enable) from [] (pxa3xx_gcu_probe+0x148/0x338) [] (pxa3xx_gcu_probe) from [] (platform_drv_probe+0x30/0x94) Signed-off-by: Robert Jarzmik Signed-off-by: Tomi Valkeinen diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 86bd457..50bce45 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) goto err_free_dma; } - ret = clk_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); if (ret < 0) { dev_err(dev, "failed to enable clock\n"); goto err_misc_deregister; @@ -685,7 +685,7 @@ err_misc_deregister: misc_deregister(&priv->misc_dev); err_disable_clk: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } -- cgit v0.10.2 From 96fffb4f23f124f297d51dedc9cf51d19eb88ee1 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Sun, 9 Aug 2015 13:14:15 +0200 Subject: netfilter: ip6t_SYNPROXY: fix NULL pointer dereference This happens when networking namespaces are enabled. Suggested-by: Patrick McHardy Signed-off-by: Phil Sutter Acked-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 6edb7b1..bcebc24 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, } static void -synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, +synproxy_send_tcp(const struct synproxy_net *snet, + const struct sk_buff *skb, struct sk_buff *nskb, struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, struct ipv6hdr *niph, struct tcphdr *nth, unsigned int tcp_hdr_size) { - struct net *net = nf_ct_net((struct nf_conn *)nfct); + struct net *net = nf_ct_net(snet->tmpl); struct dst_entry *dst; struct flowi6 fl6; @@ -83,7 +84,8 @@ free_nskb: } static void -synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, +synproxy_send_client_synack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; @@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } @@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, niph, nth, tcp_hdr_size); } @@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); } static void @@ -241,7 +243,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); } static bool @@ -301,7 +303,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); - synproxy_send_client_synack(skb, th, &opts); + synproxy_send_client_synack(snet, skb, th, &opts); return NF_DROP; } else if (th->ack && !(th->fin || th->rst || th->syn)) { -- cgit v0.10.2 From 3c16241c445303a90529565e7437e1f240acfef2 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Tue, 28 Jul 2015 00:53:26 +0200 Subject: netfilter: SYNPROXY: fix sending window update to client Upon receipt of SYNACK from the server, ipt_SYNPROXY first sends back an ACK to finish the server handshake, then calls nf_ct_seqadj_init() to initiate sequence number adjustment of forwarded packets to the client and finally sends a window update to the client to unblock it's TX queue. Since synproxy_send_client_ack() does not set synproxy_send_tcp()'s nfct parameter, no sequence number adjustment happens and the client receives the window update with incorrect sequence number. Depending on client TCP implementation, this leads to a significant delay (until a window probe is being sent). Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index fe8cc18..95ea633e 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); } static bool diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index bcebc24..ebbb754 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -243,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); } static bool -- cgit v0.10.2 From 37b617f9be527b1f1181e3ff23df4cdbe3e6441f Mon Sep 17 00:00:00 2001 From: Christian Engelmayer Date: Sat, 11 Jul 2015 19:46:11 +0200 Subject: video: Fix possible leak in of_get_videomode() In case videomode_from_timings() fails in function of_get_videomode(), the allocated display timing data is not freed in the exit path. Make sure that display_timings_release() is called in any case. Detected by Coverity CID 1309681. Signed-off-by: Christian Engelmayer Signed-off-by: Tomi Valkeinen diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c index 111c2d1..b5102aa 100644 --- a/drivers/video/of_videomode.c +++ b/drivers/video/of_videomode.c @@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm, index = disp->native_mode; ret = videomode_from_timings(disp, vm, index); - if (ret) - return ret; display_timings_release(disp); - return 0; + return ret; } EXPORT_SYMBOL_GPL(of_get_videomode); -- cgit v0.10.2 From 2a17d7e80f1df44d6b94c3696d5eda44fe6638a8 Mon Sep 17 00:00:00 2001 From: Scot Doyle Date: Tue, 4 Aug 2015 12:33:32 +0000 Subject: fbcon: unconditionally initialize cursor blink interval A sun7i-a20-olinuxino-micro fails to boot when kernel parameter vt.global_cursor_default=0. The value is copied to vc->vc_deccm causing the initialization of ops->cur_blink_jiffies to be skipped. Unconditionally initialize it. Reported-and-tested-by: Jonathan Liu Signed-off-by: Scot Doyle Acked-by: Pavel Machek Signed-off-by: Tomi Valkeinen diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 658c34b..1aaf893 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode) int y; int c = scr_readw((u16 *) vc->vc_pos); + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) return; - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); if (vc->vc_cursor_type & 0x10) fbcon_del_cursor_timer(info); else -- cgit v0.10.2 From fc5fee86bdd3d720e2d1d324e4fae0c35845fa63 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 10 Aug 2015 15:40:27 +0200 Subject: x86/xen: build "Xen PV" APIC driver for domU as well It turns out that a PV domU also requires the "Xen PV" APIC driver. Otherwise, the flat driver is used and we get stuck in busy loops that never exit, such as in this stack trace: (gdb) target remote localhost:9999 Remote debugging using localhost:9999 __xapic_wait_icr_idle () at ./arch/x86/include/asm/ipi.h:56 56 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) (gdb) bt #0 __xapic_wait_icr_idle () at ./arch/x86/include/asm/ipi.h:56 #1 __default_send_IPI_shortcut (shortcut=, dest=, vector=) at ./arch/x86/include/asm/ipi.h:75 #2 apic_send_IPI_self (vector=246) at arch/x86/kernel/apic/probe_64.c:54 #3 0xffffffff81011336 in arch_irq_work_raise () at arch/x86/kernel/irq_work.c:47 #4 0xffffffff8114990c in irq_work_queue (work=0xffff88000fc0e400) at kernel/irq_work.c:100 #5 0xffffffff8110c29d in wake_up_klogd () at kernel/printk/printk.c:2633 #6 0xffffffff8110ca60 in vprintk_emit (facility=0, level=, dict=0x0 , dictlen=, fmt=, args=) at kernel/printk/printk.c:1778 #7 0xffffffff816010c8 in printk (fmt=) at kernel/printk/printk.c:1868 #8 0xffffffffc00013ea in ?? () #9 0x0000000000000000 in ?? () Mailing-list-thread: https://lkml.org/lkml/2015/8/4/755 Signed-off-by: Jason A. Donenfeld Cc: Signed-off-by: David Vrabel diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 7322755..4b6e29a 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp) obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ time.o xen-asm.o xen-asm_$(BITS).o \ grant-table.o suspend.o platform-pci-unplug.o \ - p2m.o + p2m.o apic.o obj-$(CONFIG_EVENT_TRACING) += trace.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o -obj-$(CONFIG_XEN_DOM0) += apic.o vga.o +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9e195c6..bef30cb 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -101,17 +101,15 @@ struct dom0_vga_console_info; #ifdef CONFIG_XEN_DOM0 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); -void __init xen_init_apic(void); #else static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) { } -static inline void __init xen_init_apic(void) -{ -} #endif +void __init xen_init_apic(void); + #ifdef CONFIG_XEN_EFI extern void xen_efi_init(void); #else -- cgit v0.10.2 From 878854a374620a3f5e8c0a3c418e82a429bc2cff Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 7 Aug 2015 21:03:23 -0500 Subject: arm64: VDSO: fix coarse clock monotonicity regression Since 906c55579a63 ("timekeeping: Copy the shadow-timekeeper over the real timekeeper last") it has become possible on arm64 to: - Obtain a CLOCK_MONOTONIC_COARSE or CLOCK_REALTIME_COARSE timestamp via syscall. - Subsequently obtain a timestamp for the same clock ID via VDSO which predates the first timestamp (by one jiffy). This is because arm64's update_vsyscall is deriving the coarse time using the __current_kernel_time interface, when it should really be using the timekeeper object provided to it by the timekeeping core. It happened to work before only because __current_kernel_time would access the same timekeeper object which had been passed to update_vsyscall. This is no longer the case. Signed-off-by: Nathan Lynch Acked-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index ec37ab3..97bc68f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -199,16 +199,15 @@ up_fail: */ void update_vsyscall(struct timekeeper *tk) { - struct timespec xtime_coarse; u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); - xtime_coarse = __current_kernel_time(); vdso_data->use_syscall = use_syscall; - vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; - vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->xtime_coarse_sec = tk->xtime_sec; + vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> + tk->tkr_mono.shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; -- cgit v0.10.2 From d53793c5d6eb0cfe4175d9c315a30d65adf3478b Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 6 Aug 2015 19:00:28 +0200 Subject: net: mvpp2: remove excessive spinlocks from driver initialization Using spinlocks protection during one-time driver initialization is not necessary. Moreover it resulted in invalid GFP_KERNEL allocation under the lock. This commit removes redundant spinlocks from buffer manager part of mvpp2 initialization. Signed-off-by: Marcin Wojtas Reported-by: Alexandre Fournier Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 3e8b1bf..f94bd12 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -913,8 +913,6 @@ struct mvpp2_bm_pool { /* Occupied buffers indicator */ atomic_t in_use; int in_use_thresh; - - spinlock_t lock; }; struct mvpp2_buff_hdr { @@ -3376,7 +3374,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->pkt_size = 0; bm_pool->buf_num = 0; atomic_set(&bm_pool->in_use, 0); - spin_lock_init(&bm_pool->lock); return 0; } @@ -3647,7 +3644,6 @@ static struct mvpp2_bm_pool * mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, int pkt_size) { - unsigned long flags = 0; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; @@ -3656,8 +3652,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, return NULL; } - spin_lock_irqsave(&new_pool->lock, flags); - if (new_pool->type == MVPP2_BM_FREE) new_pool->type = type; @@ -3686,8 +3680,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); - /* We need to undo the bufs_add() allocations */ - spin_unlock_irqrestore(&new_pool->lock, flags); return NULL; } } @@ -3695,15 +3687,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); - spin_unlock_irqrestore(&new_pool->lock, flags); - return new_pool; } /* Initialize pools for swf */ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { - unsigned long flags = 0; int rxq; if (!port->pool_long) { @@ -3714,9 +3703,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_long) return -ENOMEM; - spin_lock_irqsave(&port->pool_long->lock, flags); port->pool_long->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_long->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); @@ -3730,9 +3717,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_short) return -ENOMEM; - spin_lock_irqsave(&port->pool_short->lock, flags); port->pool_short->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_short->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_short_pool_set(port, rxq, -- cgit v0.10.2 From 71ce391dfb7843f4d31abcd7287967a00cb1b8a1 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 6 Aug 2015 19:00:29 +0200 Subject: net: mvpp2: enable proper per-CPU TX buffers unmapping mvpp2 driver allows usage of per-CPU TX processing. Once the packets are prepared independetly on each CPU, the hardware enqueues the descriptors in common TX queue. After they are sent, the buffers and associated sk_buffs should be released on the corresponding CPU. This is why a special index is maintained in order to point to the right data to be released after transmission takes place. Each per-CPU TX queue comprise an array of sent sk_buffs, freed in mvpp2_txq_bufs_free function. However, the index was used there also for obtaining a descriptor (and therefore a buffer to be DMA-unmapped) from common TX queue, which was wrong, because it was not referring to the current CPU. This commit enables proper unmapping of sent data buffers by indexing them in per-CPU queues using a dedicated array for keeping their physical addresses. Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index f94bd12..3e25d314 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -776,6 +776,9 @@ struct mvpp2_txq_pcpu { /* Array of transmitted skb */ struct sk_buff **tx_skb; + /* Array of transmitted buffers' physical addresses */ + dma_addr_t *tx_buffs; + /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -961,9 +964,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) } static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb) + struct sk_buff *skb, + struct mvpp2_tx_desc *tx_desc) { txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; + if (skb) + txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] = + tx_desc->buf_phys_addr; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -4392,8 +4399,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, int i; for (i = 0; i < num; i++) { - struct mvpp2_tx_desc *tx_desc = txq->descs + - txq_pcpu->txq_get_index; + dma_addr_t buf_phys_addr = + txq_pcpu->tx_buffs[txq_pcpu->txq_get_index]; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; mvpp2_txq_inc_get(txq_pcpu); @@ -4401,8 +4408,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, if (!skb) continue; - dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, - tx_desc->data_size, DMA_TO_DEVICE); + dma_unmap_single(port->dev->dev.parent, buf_phys_addr, + skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } @@ -4634,12 +4641,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * sizeof(*txq_pcpu->tx_skb), GFP_KERNEL); - if (!txq_pcpu->tx_skb) { - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); - return -ENOMEM; - } + if (!txq_pcpu->tx_skb) + goto error; + + txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size * + sizeof(dma_addr_t), GFP_KERNEL); + if (!txq_pcpu->tx_buffs) + goto error; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -4648,6 +4656,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port, } return 0; + +error: + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); + } + + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + return -ENOMEM; } /* Free allocated TXQ resources */ @@ -4660,6 +4681,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); } if (txq->descs) @@ -5129,11 +5151,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ tx_desc->command = MVPP2_TXD_L_DESC; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); } } @@ -5199,12 +5221,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { -- cgit v0.10.2 From edc660fa09e2295b6ee2d2bf742c2a72dfeb18d2 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 6 Aug 2015 19:00:30 +0200 Subject: net: mvpp2: replace TX coalescing interrupts with hrtimer The PP2 controller is capable of per-CPU TX processing, which means there are per-CPU banked register sets and queues. Current version of the driver supports TX packet coalescing - once on given CPU sent packets amount reaches a threshold value, an IRQ occurs. However, there is a single interrupt line responsible for CPU0/1 TX and RX events (the latter is not per-CPU, the hardware does not support RSS). When the top-half executes the interrupt cause is not known. This is why in NAPI poll function, along with RX processing, IRQ cause register on both CPU's is accessed in order to determine on which of them the TX coalescing threshold might have been reached. Thus the egress processing and releasing the buffers is able to take place on the corresponding CPU. Hitherto approach lead to an illegal usage of on_each_cpu function in softirq context. The problem is solved by resigning from TX coalescing interrupts and separating egress finalization from NAPI processing. For that purpose a method of using hrtimer is introduced. In main transmit function (mvpp2_tx) buffers are released once a software coalescing threshold is reached. In case not all the data is processed a timer is set on this CPU - in its interrupt context a tasklet is scheduled in which all queues are processed. At once only one timer per-CPU can be running, which is controlled by a dedicated flag. This commit removes TX processing from NAPI polling function, disables hardware coalescing and enables hrtimer with tasklet, using new per-CPU port structure (mvpp2_port_pcpu). Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 3e25d314..d9884fd 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include #include @@ -299,6 +301,7 @@ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats { u64 tx_bytes; }; +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + struct mvpp2_port { u8 id; @@ -679,6 +690,9 @@ struct mvpp2_port { u32 pending_cause_rx; struct napi_struct napi; + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + /* Flags */ unsigned long flags; @@ -3798,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg) mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } @@ -4374,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, rxq->time_coal = usec; } -/* Set threshold for TX_DONE pkts coalescing */ -static void mvpp2_tx_done_pkts_coal_set(void *arg) -{ - struct mvpp2_port *port = arg; - int queue; - u32 val; - - for (queue = 0; queue < txq_number; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) & - MVPP2_TRANSMITTED_THRESH_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val); - } -} - /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4425,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, u32 cause) { - int queue = fls(cause >> 16) - 1; + int queue = fls(cause) - 1; return port->txqs[queue]; } @@ -4452,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = this_cpu_ptr(txq->pcpu); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + /* Rx/Tx queue initialization/cleanup methods */ /* Allocate and initialize descriptors for aggr TXQ */ @@ -4812,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) goto err_cleanup; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -4894,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev) } } +static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) +{ + ktime_t interval; + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS); + hrtimer_start(&port_pcpu->tx_done_timer, interval, + HRTIMER_MODE_REL_PINNED); + } +} + +static void mvpp2_tx_proc_cb(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + unsigned int tx_todo, cause; + + if (!netif_running(dev)) + return; + port_pcpu->timer_scheduled = false; + + /* Process all the Tx queues */ + cause = (1 << txq_number) - 1; + tx_todo = mvpp2_tx_done(port, cause); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo) + mvpp2_timer_set(port_pcpu); +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct mvpp2_port_pcpu *port_pcpu = container_of(timer, + struct mvpp2_port_pcpu, + tx_done_timer); + + tasklet_schedule(&port_pcpu->tx_done_tasklet); + + return HRTIMER_NORESTART; +} + /* Main RX/TX processing routines */ /* Display more error info */ @@ -5262,6 +5323,17 @@ out: dev_kfree_skb_any(skb); } + /* Finalize TX processing */ + if (txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + + mvpp2_timer_set(port_pcpu); + } + return NETDEV_TX_OK; } @@ -5275,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) netdev_err(dev, "tx fifo underrun error\n"); } -static void mvpp2_txq_done_percpu(void *arg) +static int mvpp2_poll(struct napi_struct *napi, int budget) { - struct mvpp2_port *port = arg; - u32 cause_rx_tx, cause_tx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); /* Rx/Tx cause register * @@ -5292,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg) */ cause_rx_tx = mvpp2_read(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { @@ -5304,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - /* Release TX descriptors */ - if (cause_tx) { - struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx); - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - - if (txq_pcpu->count) - mvpp2_txq_done(port, txq, txq_pcpu); - } -} - -static int mvpp2_poll(struct napi_struct *napi, int budget) -{ - u32 cause_rx_tx, cause_rx; - int rx_done = 0; - struct mvpp2_port *port = netdev_priv(napi->dev); - - on_each_cpu(mvpp2_txq_done_percpu, port, 1); - - cause_rx_tx = mvpp2_read(port->priv, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; /* Process RX packets */ @@ -5568,6 +5621,8 @@ err_cleanup_rxqs: static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + int cpu; mvpp2_stop_dev(port); mvpp2_phy_disconnect(port); @@ -5576,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev) on_each_cpu(mvpp2_interrupts_mask, port, 1); free_irq(port->irq, port); + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -5791,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, txq->done_pkts_coal = c->tx_max_coalesced_frames; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); return 0; } @@ -6042,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, { struct device_node *phy_node; struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; struct net_device *dev; struct resource *res; const char *dt_mac_addr; @@ -6051,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, int features; int phy_mode; int priv_common_regs_num = 2; - int err, i; + int err, i, cpu; dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, rxq_number); @@ -6142,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev, } mvpp2_port_power_up(port); + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + + tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, + (unsigned long)dev); + } + netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; @@ -6151,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_free_txq_pcpu; + goto err_free_port_pcpu; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); @@ -6160,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, priv->port_list[id] = port; return 0; +err_free_port_pcpu: + free_percpu(port->pcpu); err_free_txq_pcpu: for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); @@ -6178,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); -- cgit v0.10.2 From ade4dc3e616e33c80d7e62855fe1b6f9895bc7c3 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Thu, 6 Aug 2015 22:48:23 +0200 Subject: bna: fix interrupts storm caused by erroneous packets The commit "e29aa33 bna: Enable Multi Buffer RX" moved packets counter increment from the beginning of the NAPI processing loop after the check for erroneous packets so they are never accounted. This counter is used to inform firmware about number of processed completions (packets). As these packets are never acked the firmware fires IRQs for them again and again. Fixes: e29aa33 ("bna: Enable Multi Buffer RX") Signed-off-by: Ivan Vecera Acked-by: Rasesh Mody Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 0612b19..506047c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (!next_cmpl->valid) break; } + packets++; /* TODO: BNA_CQ_EF_LOCAL ? */ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | @@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) else bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); - packets++; rcb->rxq->rx_packets++; rcb->rxq->rx_bytes += totlen; ccb->bytes_per_intr += totlen; -- cgit v0.10.2 From 4e7c1330689e27556de407d3fdadc65ffff5eb12 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 7 Aug 2015 00:26:41 +0200 Subject: netlink: make sure -EBUSY won't escape from netlink_insert Linus reports the following deadlock on rtnl_mutex; triggered only once so far (extract): [12236.694209] NetworkManager D 0000000000013b80 0 1047 1 0x00000000 [12236.694218] ffff88003f902640 0000000000000000 ffffffff815d15a9 0000000000000018 [12236.694224] ffff880119538000 ffff88003f902640 ffffffff81a8ff84 00000000ffffffff [12236.694230] ffffffff81a8ff88 ffff880119c47f00 ffffffff815d133a ffffffff81a8ff80 [12236.694235] Call Trace: [12236.694250] [] ? schedule_preempt_disabled+0x9/0x10 [12236.694257] [] ? schedule+0x2a/0x70 [12236.694263] [] ? schedule_preempt_disabled+0x9/0x10 [12236.694271] [] ? __mutex_lock_slowpath+0x7f/0xf0 [12236.694280] [] ? mutex_lock+0x16/0x30 [12236.694291] [] ? rtnetlink_rcv+0x10/0x30 [12236.694299] [] ? netlink_unicast+0xfb/0x180 [12236.694309] [] ? rtnl_getlink+0x113/0x190 [12236.694319] [] ? rtnetlink_rcv_msg+0x7a/0x210 [12236.694331] [] ? sock_has_perm+0x5c/0x70 [12236.694339] [] ? rtnetlink_rcv+0x30/0x30 [12236.694346] [] ? netlink_rcv_skb+0x9c/0xc0 [12236.694354] [] ? rtnetlink_rcv+0x1f/0x30 [12236.694360] [] ? netlink_unicast+0xfb/0x180 [12236.694367] [] ? netlink_sendmsg+0x484/0x5d0 [12236.694376] [] ? __wake_up+0x2f/0x50 [12236.694387] [] ? sock_sendmsg+0x33/0x40 [12236.694396] [] ? ___sys_sendmsg+0x22e/0x240 [12236.694405] [] ? ___sys_recvmsg+0x135/0x1a0 [12236.694415] [] ? eventfd_write+0x82/0x210 [12236.694423] [] ? fsnotify+0x32e/0x4c0 [12236.694429] [] ? wake_up_q+0x60/0x60 [12236.694434] [] ? __sys_sendmsg+0x39/0x70 [12236.694440] [] ? entry_SYSCALL_64_fastpath+0x12/0x6a It seems so far plausible that the recursive call into rtnetlink_rcv() looks suspicious. One way, where this could trigger is that the senders NETLINK_CB(skb).portid was wrongly 0 (which is rtnetlink socket), so the rtnl_getlink() request's answer would be sent to the kernel instead to the actual user process, thus grabbing rtnl_mutex() twice. One theory would be that netlink_autobind() triggered via netlink_sendmsg() internally overwrites the -EBUSY error to 0, but where it is wrongly originating from __netlink_insert() instead. That would reset the socket's portid to 0, which is then filled into NETLINK_CB(skb).portid later on. As commit d470e3b483dc ("[NETLINK]: Fix two socket hashing bugs.") also puts it, -EBUSY should not be propagated from netlink_insert(). It looks like it's very unlikely to reproduce. We need to trigger the rhashtable_insert_rehash() handler under a situation where rehashing currently occurs (one /rare/ way would be to hit ht->elasticity limits while not filled enough to expand the hashtable, but that would rather require a specifically crafted bind() sequence with knowledge about destination slots, seems unlikely). It probably makes sense to guard __netlink_insert() in any case and remap that error. It was suggested that EOVERFLOW might be better than an already overloaded ENOMEM. Reference: http://thread.gmane.org/gmane.linux.network/372676 Reported-by: Linus Torvalds Signed-off-by: Daniel Borkmann Acked-by: Herbert Xu Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d8e2e39..67d2104 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid) err = __netlink_insert(table, sk); if (err) { + /* In case the hashtable backend returns with -EBUSY + * from here, it must not escape to the caller. + */ + if (unlikely(err == -EBUSY)) + err = -EOVERFLOW; if (err == -EEXIST) err = -EADDRINUSE; nlk_sk(sk)->portid = 0; -- cgit v0.10.2 From 330567b71d8716704b189454553c2696e1eceb6c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 7 Aug 2015 10:54:28 +0200 Subject: ipv6: don't reject link-local nexthop on other interface 48ed7b26faa7 ("ipv6: reject locally assigned nexthop addresses") is too strict; it rejects following corner-case: ip -6 route add default via fe80::1:2:3 dev eth1 [ where fe80::1:2:3 is assigned to a local interface, but not eth1 ] Fix this by restricting search to given device if nh is linklocal. Joint work with Hannes Frederic Sowa. Fixes: 48ed7b26faa7 ("ipv6: reject locally assigned nexthop addresses") Signed-off-by: Hannes Frederic Sowa Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6090969..9de4d2b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1831,6 +1831,7 @@ int ip6_route_add(struct fib6_config *cfg) int gwa_type; gw_addr = &cfg->fc_gateway; + gwa_type = ipv6_addr_type(gw_addr); /* if gw_addr is local we will fail to detect this in case * address is still TENTATIVE (DAD in progress). rt6_lookup() @@ -1838,11 +1839,12 @@ int ip6_route_add(struct fib6_config *cfg) * prefix route was assigned to, which might be non-loopback. */ err = -EINVAL; - if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) + if (ipv6_chk_addr_and_flags(net, gw_addr, + gwa_type & IPV6_ADDR_LINKLOCAL ? + dev : NULL, 0, 0)) goto out; rt->rt6i_gateway = *gw_addr; - gwa_type = ipv6_addr_type(gw_addr); if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { struct rt6_info *grt; -- cgit v0.10.2 From 7a76a021cd5a292be875fbc616daf03eab1e6996 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 7 Aug 2015 09:32:21 -0700 Subject: net-timestamp: Update skb_complete_tx_timestamp comment After "62bccb8 net-timestamp: Make the clone operation stand-alone from phy timestamping" the hwtstamps parameter of skb_complete_tx_timestamp() may no longer be NULL. Signed-off-by: Benjamin Poirier Cc: Alexander Duyck Signed-off-by: David S. Miller diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d6cdd6e..22b6d9c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2884,11 +2884,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * * PHY drivers may accept clones of transmitted packets for * timestamping via their phy_driver.txtstamp method. These drivers - * must call this function to return the skb back to the stack, with - * or without a timestamp. + * must call this function to return the skb back to the stack with a + * timestamp. * * @skb: clone of the the original outgoing packet - * @hwtstamps: hardware time stamps, may be NULL if not available + * @hwtstamps: hardware time stamps * */ void skb_complete_tx_timestamp(struct sk_buff *skb, -- cgit v0.10.2 From 21a447637d28eb824a1163c1fc5f41ffa4b28e33 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 8 Aug 2015 22:15:25 +0300 Subject: cxgb4: missing curly braces in t4_setup_debugfs() There were missing curly braces so it means we call add_debugfs_mem() unintentionally. Fixes: 3ccc6cf74d8c ('cxgb4: Adds support for T6 adapter') Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index a11485f..c3c7db4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap) EXT_MEM1_SIZE_G(size)); } } else { - if (i & EXT_MEM_ENABLE_F) + if (i & EXT_MEM_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_G(size)); + } } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, -- cgit v0.10.2 From e1615903eb6b5e599396d4b3d8e3e96f6d432a6e Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 10 Aug 2015 12:49:35 +0300 Subject: bnx2x: Prevent null pointer dereference on SKB release On error flows its possible to free an SKB even if it was not allocated. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a90d736..f7fbdc9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, if (likely(skb)) { (*pkts_compl)++; (*bytes_compl) += skb->len; + dev_kfree_skb_any(skb); } - dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; -- cgit v0.10.2 From 0ea853dfa93371e651d8b7b27fd2344e973a86ed Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 10 Aug 2015 12:49:36 +0300 Subject: bnx2x: Free NVRAM lock at end of each page Writing each 4Kb page into flash might take up-to ~100 miliseconds, during which time management firmware cannot acces the nvram for its own uses. Firmware upgrade utility use the ethtool API to burn new flash images for the device via the ethtool API, doing so by writing several page-worth of data on each command. Such action might create problems for the management firmware, as the nvram might not be accessible for a long time. This patch changes the write implementation, releasing the nvram lock on the completion of each page, allowing the management firmware time to claim it and perform its own required actions. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 76b9052..5907c82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, offset += sizeof(u32); data_buf += sizeof(u32); written_so_far += sizeof(u32); + + /* At end of each 4Kb page, release nvram lock to allow MFW + * chance to take it for its own use. + */ + if ((cmd_flags & MCPR_NVM_COMMAND_LAST) && + (written_so_far < buf_size)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Releasing NVM lock after offset 0x%x\n", + (u32)(offset - sizeof(u32))); + bnx2x_release_nvram_lock(bp); + usleep_range(1000, 2000); + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + } + cmd_flags = 0; } -- cgit v0.10.2 From 0be017120b80f0fe3da9a8239f989a27e54828f2 Mon Sep 17 00:00:00 2001 From: Jason Gerecke Date: Wed, 5 Aug 2015 15:44:53 -0700 Subject: HID: wacom: Report correct device resolution when using the wireless adapater The 'wacom_wireless_work' function does not recalculate the tablet's resolution, causing the value contained in the 'features' struct to always be reported to userspace. This value is valid only for the pen interface, meaning that the value will be incorrect for the touchpad (if present). This in particular causes problems for libinput which relies on the reported resolution being correct. This patch adds the necessary calls to recalculate the resolution for each interface. This requires a little bit of code shuffling since both the 'wacom_set_default_phy' and 'wacom_calculate_res' are declared below their new first point of use in 'wacom_wireless_work'. Signed-off-by: Jason Gerecke Signed-off-by: Jiri Kosina diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 44958d7..01b937e 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1284,6 +1284,39 @@ fail_register_pen_input: return error; } +/* + * Not all devices report physical dimensions from HID. + * Compute the default from hardcoded logical dimension + * and resolution before driver overwrites them. + */ +static void wacom_set_default_phy(struct wacom_features *features) +{ + if (features->x_resolution) { + features->x_phy = (features->x_max * 100) / + features->x_resolution; + features->y_phy = (features->y_max * 100) / + features->y_resolution; + } +} + +static void wacom_calculate_res(struct wacom_features *features) +{ + /* set unit to "100th of a mm" for devices not reported by HID */ + if (!features->unit) { + features->unit = 0x11; + features->unitExpo = -3; + } + + features->x_resolution = wacom_calc_hid_res(features->x_max, + features->x_phy, + features->unit, + features->unitExpo); + features->y_resolution = wacom_calc_hid_res(features->y_max, + features->y_phy, + features->unit, + features->unitExpo); +} + static void wacom_wireless_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, work); @@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work) if (wacom_wac1->features.type != INTUOSHT && wacom_wac1->features.type != BAMBOO_PT) wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; + wacom_set_default_phy(&wacom_wac1->features); + wacom_calculate_res(&wacom_wac1->features); snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", @@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_wac2->features = *((struct wacom_features *)id->driver_data); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; + wacom_set_default_phy(&wacom_wac2->features); wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; + wacom_calculate_res(&wacom_wac2->features); snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, "%s (WL) Finger",wacom_wac2->features.name); snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, @@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work) } } -/* - * Not all devices report physical dimensions from HID. - * Compute the default from hardcoded logical dimension - * and resolution before driver overwrites them. - */ -static void wacom_set_default_phy(struct wacom_features *features) -{ - if (features->x_resolution) { - features->x_phy = (features->x_max * 100) / - features->x_resolution; - features->y_phy = (features->y_max * 100) / - features->y_resolution; - } -} - -static void wacom_calculate_res(struct wacom_features *features) -{ - /* set unit to "100th of a mm" for devices not reported by HID */ - if (!features->unit) { - features->unit = 0x11; - features->unitExpo = -3; - } - - features->x_resolution = wacom_calc_hid_res(features->x_max, - features->x_phy, - features->unit, - features->unitExpo); - features->y_resolution = wacom_calc_hid_res(features->y_max, - features->y_phy, - features->unit, - features->unitExpo); -} - static size_t wacom_compute_pktlen(struct hid_device *hdev) { struct hid_report_enum *report_enum; -- cgit v0.10.2 From 9d332d92a95c9c67abe08b5f7cba64d8fc1e3c76 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 10 Aug 2015 14:22:43 -0300 Subject: mkiss: Fix error handling in mkiss_open() If register_netdev() fails we are not propagating the error and we return success because ax_open() succeeded previously. Fix this by checking the return value of ax_open() and register_netdev() and propagate the error in case of failure. Reported-by: RUC_Soft_Sec Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 2ffbf13..216bfd3 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty) dev->type = ARPHRD_AX25; /* Perform the low-level AX25 initialization. */ - if ((err = ax_open(ax->dev))) { + err = ax_open(ax->dev); + if (err) goto out_free_netdev; - } - if (register_netdev(dev)) + err = register_netdev(dev); + if (err) goto out_free_buffers; /* after register_netdev() - because else printk smashes the kernel */ -- cgit v0.10.2 From 2235f2ac75fd2501c251b0b699a9632e80239a6d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 10 Aug 2015 09:09:13 -0700 Subject: inet: fix races with reqsk timers reqsk_queue_destroy() and reqsk_queue_unlink() should use del_timer_sync() instead of del_timer() before calling reqsk_put(), otherwise we could free a req still used by another cpu. But before doing so, reqsk_queue_destroy() must release syn_wait_lock spinlock or risk a dead lock, as reqsk_timer_handler() might need to take this same spinlock from reqsk_queue_unlink() (called from inet_csk_reqsk_queue_drop()) Fixes: fa76ce7328b2 ("inet: get rid of central tcp/dccp listener timer") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 87b22c0..b42f0e2 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) spin_lock_bh(&queue->syn_wait_lock); while ((req = lopt->syn_table[i]) != NULL) { lopt->syn_table[i] = req->dl_next; + /* Because of following del_timer_sync(), + * we must release the spinlock here + * or risk a dead lock. + */ + spin_unlock_bh(&queue->syn_wait_lock); atomic_inc(&lopt->qlen_dec); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); reqsk_put(req); + spin_lock_bh(&queue->syn_wait_lock); } spin_unlock_bh(&queue->syn_wait_lock); } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 60021d0..05e3145 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, } spin_unlock(&queue->syn_wait_lock); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); return found; } -- cgit v0.10.2 From 3257d8b12f954c462d29de6201664a846328a522 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 10 Aug 2015 15:07:34 -0700 Subject: inet: fix possible request socket leak In commit b357a364c57c9 ("inet: fix possible panic in reqsk_queue_unlink()"), I missed fact that tcp_check_req() can return the listener socket in one case, and that we must release the request socket refcount or we leak it. Tested: Following packetdrill test template shows the issue 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 +0 < S 0:0(0) win 2920 +0 > S. 0:0(0) ack 1 +.002 < . 1:1(0) ack 21 win 2920 +0 > R 21:21(0) Fixes: b357a364c57c9 ("inet: fix possible panic in reqsk_queue_unlink()") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d7d4c2b..0ea2e1c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6748c42..7a6cea5 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } -- cgit v0.10.2 From ad6cd7bafcd2c812ba4200d5938e07304f1e2fcd Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Mon, 10 Aug 2015 18:11:06 +0100 Subject: Revert "xen/events/fifo: Handle linked events when closing a port" This reverts commit fcdf31a7c162de0c93a2bee51df4688ab0a348f8. This was causing a WARNING whenever a PIRQ was closed since shutdown_pirq() is called with irqs disabled. Signed-off-by: David Vrabel Cc: diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 1495ecc..96093ae 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -452,12 +452,10 @@ static void xen_free_irq(unsigned irq) irq_free_desc(irq); } -static void xen_evtchn_close(unsigned int port, unsigned int cpu) +static void xen_evtchn_close(unsigned int port) { struct evtchn_close close; - xen_evtchn_op_close(port, cpu); - close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); @@ -546,7 +544,7 @@ out: err: pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); - xen_evtchn_close(evtchn, NR_CPUS); + xen_evtchn_close(evtchn); return 0; } @@ -567,7 +565,7 @@ static void shutdown_pirq(struct irq_data *data) return; mask_evtchn(evtchn); - xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn)); + xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } @@ -611,7 +609,7 @@ static void __unbind_from_irq(unsigned int irq) if (VALID_EVTCHN(evtchn)) { unsigned int cpu = cpu_from_irq(irq); - xen_evtchn_close(evtchn, cpu); + xen_evtchn_close(evtchn); switch (type_from_irq(irq)) { case IRQT_VIRQ: diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 6df8aac..ed673e1 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -255,12 +255,6 @@ static void evtchn_fifo_unmask(unsigned port) } } -static bool evtchn_fifo_is_linked(unsigned port) -{ - event_word_t *word = event_word_from_port(port); - return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word)); -} - static uint32_t clear_linked(volatile event_word_t *word) { event_word_t new, old, w; @@ -287,8 +281,7 @@ static void handle_irq_for_port(unsigned port) static void consume_one_event(unsigned cpu, struct evtchn_fifo_control_block *control_block, - unsigned priority, unsigned long *ready, - bool drop) + unsigned priority, unsigned long *ready) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -320,15 +313,13 @@ static void consume_one_event(unsigned cpu, if (head == 0) clear_bit(priority, ready); - if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { - if (likely(!drop)) - handle_irq_for_port(port); - } + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) + handle_irq_for_port(port); q->head[priority] = head; } -static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) +static void evtchn_fifo_handle_events(unsigned cpu) { struct evtchn_fifo_control_block *control_block; unsigned long ready; @@ -340,16 +331,11 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) while (ready) { q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); - consume_one_event(cpu, control_block, q, &ready, drop); + consume_one_event(cpu, control_block, q, &ready); ready |= xchg(&control_block->ready, 0); } } -static void evtchn_fifo_handle_events(unsigned cpu) -{ - __evtchn_fifo_handle_events(cpu, false); -} - static void evtchn_fifo_resume(void) { unsigned cpu; @@ -385,26 +371,6 @@ static void evtchn_fifo_resume(void) event_array_pages = 0; } -static void evtchn_fifo_close(unsigned port, unsigned int cpu) -{ - if (cpu == NR_CPUS) - return; - - get_online_cpus(); - if (cpu_online(cpu)) { - if (WARN_ON(irqs_disabled())) - goto out; - - while (evtchn_fifo_is_linked(port)) - cpu_relax(); - } else { - __evtchn_fifo_handle_events(cpu, true); - } - -out: - put_online_cpus(); -} - static const struct evtchn_ops evtchn_ops_fifo = { .max_channels = evtchn_fifo_max_channels, .nr_channels = evtchn_fifo_nr_channels, @@ -418,7 +384,6 @@ static const struct evtchn_ops evtchn_ops_fifo = { .unmask = evtchn_fifo_unmask, .handle_events = evtchn_fifo_handle_events, .resume = evtchn_fifo_resume, - .close = evtchn_fifo_close, }; static int evtchn_fifo_alloc_control_block(unsigned cpu) diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index d18e123..50c2050a 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -68,7 +68,6 @@ struct evtchn_ops { bool (*test_and_set_mask)(unsigned port); void (*mask)(unsigned port); void (*unmask)(unsigned port); - void (*close)(unsigned port, unsigned cpu); void (*handle_events)(unsigned cpu); void (*resume)(void); @@ -146,12 +145,6 @@ static inline void xen_evtchn_resume(void) evtchn_ops->resume(); } -static inline void xen_evtchn_op_close(unsigned port, unsigned cpu) -{ - if (evtchn_ops->close) - return evtchn_ops->close(port, cpu); -} - void xen_evtchn_2l_init(void); int xen_evtchn_fifo_init(void); -- cgit v0.10.2 From c22fe519e7e2b94ad173e0ea3b89c1a7d8be8d00 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Mon, 10 Aug 2015 19:10:38 +0100 Subject: xen/xenbus: Don't leak memory when unmapping the ring on HVM backend The commit ccc9d90a9a8b5c4ad7e9708ec41f75ff9e98d61d "xenbus_client: Extend interface to support multi-page ring" removes the call to free_xenballooned_pages() in xenbus_unmap_ring_vfree_hvm(), leaking a page for every shared ring. Only with backends running in HVM domains were affected. Signed-off-by: Julien Grall Cc: Reviewed-by: Boris Ostrovsky Reviewed-by: Wei Liu Signed-off-by: David Vrabel diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9ad3272..e303535 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, addrs); - if (!rv) + if (!rv) { vunmap(vaddr); + free_xenballooned_pages(node->nr_handles, node->hvm.pages); + } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, node->nr_handles); -- cgit v0.10.2 From 8961822c46cc363c239503f998a6d24bbeb346d5 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Tue, 11 Aug 2015 12:11:00 +0200 Subject: net: fs_enet: explicitly remove I flag on TX partial frames We are not interested in interrupts for partially transmitted frames, we have to clear BD_ENET_TX_INTR explicitly otherwise it may remain from a previously used descriptor. Signed-off-by: Christophe Leroy Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 56316db..cf8e546 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) frag = skb_shinfo(skb)->frags; while (nr_frags) { CBDC_SC(bdp, - BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | + BD_ENET_TX_TC); CBDS_SC(bdp, BD_ENET_TX_READY); if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) -- cgit v0.10.2 From c68875fa82a8ab2f45a32aa8adab059f3cb1ed01 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Tue, 11 Aug 2015 12:11:03 +0200 Subject: net: fs_enet: mask interrupts for TX partial frames. We are not interested in interrupts for partially transmitted frames. Unlike SCC and FCC, the FEC doesn't handle the I bit in buffer descriptors, instead it defines two interrupt bits, TXB and TXF. We have to mask TXB in order to only get interrupts once the frame is fully transmitted. Signed-off-by: Christophe Leroy Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index b34214e..016743e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) -#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ -- cgit v0.10.2 From c0ddc8c745b7f89c50385fd7aa03c78dc543fa7a Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Mon, 27 Jul 2015 00:06:55 +0200 Subject: localmodconfig: Use Kbuild files too In kbuild it is allowed to define objects in files named "Makefile" and "Kbuild". Currently localmodconfig reads objects only from "Makefile"s and misses modules like nouveau. Link: http://lkml.kernel.org/r/1437948415-16290-1-git-send-email-richard@nod.at Cc: stable@vger.kernel.org Reported-and-tested-by: Leonidas Spyropoulos Signed-off-by: Richard Weinberger Signed-off-by: Steven Rostedt diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 9cb8522..f3d3fb4 100755 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); my $kconfig = $ARGV[1]; my $lsmod_file = $ENV{'LSMOD'}; -my @makefiles = `find $ksource -name Makefile 2>/dev/null`; +my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; chomp @makefiles; my %depends; -- cgit v0.10.2 From 7f518ad0a212e2a6fd68630e176af1de395070a7 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Wed, 12 Aug 2015 15:10:21 +0100 Subject: dm thin metadata: delete btrees when releasing metadata snapshot The device details and mapping trees were just being decremented before. Now btree_del() is called to do a deep delete. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 48dfe3c..6ba47cf 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) return r; disk_super = dm_block_data(copy); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); + dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); dm_sm_dec_block(pmd->metadata_sm, held_root); return dm_tm_unlock(pmd->tm, copy); -- cgit v0.10.2 From b0dc3c8bc157c60b1d470163882be8c13e1950af Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Wed, 12 Aug 2015 15:12:09 +0100 Subject: dm btree: add ref counting ops for the leaves of top level btrees When using nested btrees, the top leaves of the top levels contain block addresses for the root of the next tree down. If we shadow a shared leaf node the leaf values (sub tree roots) should be incremented accordingly. This is only an issue if there is metadata sharing in the top levels. Which only occurs if metadata snapshots are being used (as is possible with dm-thinp). And could result in a block from the thinp metadata snap being reused early, thus corrupting the thinp metadata snap. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index bf2b80d..8731b6e 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key); extern struct dm_block_validator btree_node_validator; +/* + * Value type for upper levels of multi-level btrees. + */ +extern void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt); + #endif /* DM_BTREE_INTERNAL_H */ diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 9ca9ecc..4222f77 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, return r; } -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { @@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < info->levels; level++) { r = remove_raw(&spine, info, (level == last_level ? - &info->value_type : &le64_type), + &info->value_type : &le64_vt), root, keys[level], (unsigned *)&index); if (r < 0) break; @@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; uint64_t k; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < last_level; level++) { - r = remove_raw(&spine, info, &le64_type, + r = remove_raw(&spine, info, &le64_vt, root, keys[level], (unsigned *) &index); if (r < 0) goto out; diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index 1b5e13e..0dee514 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s) { return s->root; } + +static void le64_inc(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_inc(tm, le64_to_cpu(v_le)); +} + +static void le64_dec(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_dec(tm, le64_to_cpu(v_le)); +} + +static int le64_equal(void *context, const void *value1_le, const void *value2_le) +{ + __le64 v1_le, v2_le; + + memcpy(&v1_le, value1_le, sizeof(v1_le)); + memcpy(&v2_le, value2_le, sizeof(v2_le)); + return v1_le == v2_le; +} + +void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt) +{ + vt->context = tm; + vt->size = sizeof(__le64); + vt->inc = le64_inc; + vt->dec = le64_dec; + vt->equal = le64_equal; +} diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index fdd3793..c7726ce 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, struct btree_node *n; struct dm_btree_value_type le64_type; - le64_type.context = NULL; - le64_type.size = sizeof(__le64); - le64_type.inc = NULL; - le64_type.dec = NULL; - le64_type.equal = NULL; - + init_le64_type(info->tm, &le64_type); init_shadow_spine(&spine, info); for (level = 0; level < (info->levels - 1); level++) { -- cgit v0.10.2 From 34dd051741572859bc1fef525c5ddbc127158b52 Mon Sep 17 00:00:00 2001 From: Yi Zhang Date: Wed, 12 Aug 2015 19:22:43 +0800 Subject: dm cache policy smq: move 'dm-cache-default' module alias to SMQ When creating dm-cache with the default policy, it will call request_module("dm-cache-default") to register the default policy. But the "dm-cache-default" alias was left referring to the MQ policy. Fix this by moving the module alias to SMQ. Fixes: bccab6a0 (dm cache: switch the "default" cache replacement policy from mq to smq) Signed-off-by: Yi Zhang Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 3281437..aa1b41c 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -1471,5 +1471,3 @@ module_exit(mq_exit); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("mq cache policy"); - -MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 48a4a82..200366c 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1789,3 +1789,5 @@ module_exit(smq_exit); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("smq cache policy"); + +MODULE_ALIAS("dm-cache-default"); -- cgit v0.10.2 From 8c8bac59dda0c41c7dd289d443ac42b7b72d31b0 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 5 Aug 2015 14:03:48 -0400 Subject: drm/amdgpu: add context buffer size check for HEVC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Boyuan Zhang Reviewed-by: Christian König diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f7a5ef..f5c2255 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) unsigned height_in_mb = ALIGN(height / 16, 2); unsigned fs_in_mb = width_in_mb * height_in_mb; - unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; + unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size; image_size = width * height; image_size += image_size / 2; @@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; min_dpb_size = image_size * num_dpb_buffer; + min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) + * 16 * num_dpb_buffer + 52 * 1024; break; default: @@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; + buf_sizes[0x4] = min_ctx_size; return 0; } @@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) return -EINVAL; } + } else if (cmd == 0x206) { + if ((end - start) < ctx->buf_sizes[4]) { + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, + (unsigned)(end - start), + ctx->buf_sizes[4]); + return -EINVAL; + } } else if ((cmd != 0x100) && (cmd != 0x204)) { DRM_ERROR("invalid UVD command %X!\n", cmd); return -EINVAL; @@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) struct amdgpu_uvd_cs_ctx ctx = {}; unsigned buf_sizes[] = { [0x00000000] = 2048, - [0x00000001] = 32 * 1024 * 1024, - [0x00000002] = 2048 * 1152 * 3, + [0x00000001] = 0xFFFFFFFF, + [0x00000002] = 0xFFFFFFFF, [0x00000003] = 2048, + [0x00000004] = 0xFFFFFFFF, }; struct amdgpu_ib *ib = &parser->ibs[ib_idx]; int r; -- cgit v0.10.2 From b8826b0cbf47ecfc9fdefdbf8c7bbb308e117005 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Aug 2015 11:08:31 -0400 Subject: Revert "drm/amdgpu: Configure doorbell to maximum slots" This reverts commit 78ad5cdd21f0d614983fc397338944e797ec70b9. This commit breaks dpm and suspend/resume on CZ. diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f5a42ab..20e2cfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3135,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, - 0x7FFFF << 2); + AMDGPU_DOORBELL_MEC_RING7 << 2); } tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, -- cgit v0.10.2 From e037239e5e7b61007763984aa35a8329596d8c88 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Aug 2015 15:28:49 -0400 Subject: drm/radeon: add new OLAND pci id Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 45c39a3..8bc073d 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -172,6 +172,7 @@ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ -- cgit v0.10.2 From 211c504a444710b1d8ce3431ac19f2578602ca27 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 8 Aug 2015 12:58:57 -0700 Subject: net: dsa: Do not override PHY interface if already configured In case we need to divert reads/writes using the slave MII bus, we may have already fetched a valid PHY interface property from Device Tree, and that mode is used by the PHY driver to make configuration decisions. If we could not fetch the "phy-mode" property, we will assign p->phy_interface to PHY_INTERFACE_MODE_NA, such that we can actually check for that condition as to whether or not we should override the interface value. Fixes: 19334920eaf7 ("net: dsa: Set valid phy interface type") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 0917123..35c47dd 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, return -ENODEV; /* Use already configured phy mode */ - p->phy_interface = p->phy->interface; + if (p->phy_interface == PHY_INTERFACE_MODE_NA) + p->phy_interface = p->phy->interface; phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, p->phy_interface); -- cgit v0.10.2 From b02e3e948de6c11fded1821d89012e24d953da12 Mon Sep 17 00:00:00 2001 From: Venkat Venkatsubra Date: Tue, 11 Aug 2015 07:57:23 -0700 Subject: bonding: Gratuitous ARP gets dropped when first slave added When the first slave is added (such as during bootup) the first gratuitous ARP gets dropped. We don't see this drop during a failover. The packet gets dropped in qdisc (noop_enqueue). The fix is to delay the sending of gratuitous ARPs till the bond dev's carrier is present. It can also be worked around by setting num_grat_arp to more than 1. Signed-off-by: Venkat Venkatsubra Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e1ccefc..a98dd4f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond) slave ? slave->dev->name : "NULL"); if (!slave || !bond->send_peer_notif || + !netif_carrier_ok(bond->dev) || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; -- cgit v0.10.2 From a898fe040f62a32b90e26dc1f1b5973608054b29 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 12 Aug 2015 02:41:55 +0200 Subject: gianfar: correct filer table writing MAX_FILER_IDX is the last usable index. Using less-than will already guarantee that one entry for catch-all rule will be left, no need to subtract 1 here. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3c0a8f8..4a710f3 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1583,11 +1583,10 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); - i++) + for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ - for (; i < MAX_FILER_IDX - 1; i++) + for (; i < MAX_FILER_IDX; i++) gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); /* Last entry must be default accept * because that's what people expect -- cgit v0.10.2 From b5c8c8906e425f71efb83291c3837e4b78b769ea Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 12 Aug 2015 02:41:56 +0200 Subject: gianfar: correct list membership accounting At a cost of one line let's make sure .count is correct when calling gfar_process_filer_changes(). Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 4a710f3..f477b67 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1721,13 +1721,14 @@ static int gfar_add_cls(struct gfar_private *priv, } process: + priv->rx_list.count++; ret = gfar_process_filer_changes(priv); if (ret) goto clean_list; - priv->rx_list.count++; return ret; clean_list: + priv->rx_list.count--; list_del(&temp->list); clean_mem: kfree(temp); -- cgit v0.10.2 From 1f2b72933422dfdaa80b59dc3a4c37eef25c4297 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 12 Aug 2015 02:41:57 +0200 Subject: gianfar: remove faulty filer optimizer Current filer rule optimization is broken in several ways: (1) Can perform reads/writes beyond end of allocated tables. (gianfar_ethtool.c:1326). (2) It breaks badly for rules with more than 2 specifiers (e.g. matching ip, port, tos). Example: # ethtool -N eth2 flow-type udp4 dst-ip 10.0.0.1 dst-port 1 tos 1 action 1 Added rule with ID 254 # ethtool -N eth2 flow-type udp4 dst-ip 10.0.0.2 dst-port 2 tos 2 action 9 Added rule with ID 253 # ethtool -N eth2 flow-type udp4 dst-ip 10.0.0.3 dst-port 3 tos 3 action 17 Added rule with ID 252 # ./filer_decode /sys/kernel/debug/gfar1/filer_raw 00: MASK == 00000210 AND Q:00 ctrl:00000080 prop:00000210 01: FPR == 00000210 AND CLE Q:00 ctrl:00000281 prop:00000210 02: MASK == ffffffff AND Q:00 ctrl:00000080 prop:ffffffff 03: DPT == 00000003 AND Q:00 ctrl:0000008e prop:00000003 04: TOS == 00000003 AND Q:00 ctrl:0000008a prop:00000003 05: DIA == 0a000003 AND Q:11 ctrl:0000448c prop:0a000003 06: DPT == 00000002 AND Q:00 ctrl:0000008e prop:00000002 07: TOS == 00000002 AND Q:00 ctrl:0000008a prop:00000002 08: DIA == 0a000002 AND Q:09 ctrl:0000248c prop:0a000002 09: DIA == 0a000001 AND Q:00 ctrl:0000008c prop:0a000001 0a: DPT == 00000001 AND Q:00 ctrl:0000008e prop:00000001 0b: TOS == 00000001 CLE Q:01 ctrl:0000060a prop:00000001 ff: MASK >= 00000000 Q:00 ctrl:00000020 prop:00000000 (Entire cluster gets AND-ed together). (3) We observed that the masking rules it generates do not play well with clustering on P2020. Only first rule of the cluster would ever fire. Given that optimizer relies heavily on masking this is very hard to fix. Example: # ethtool -N eth2 flow-type udp4 dst-ip 10.0.0.1 dst-port 1 action 1 Added rule with ID 254 # ethtool -N eth2 flow-type udp4 dst-ip 10.0.0.2 dst-port 2 action 9 Added rule with ID 253 # ethtool -N eth2 flow-type udp4 dst-ip 10.0.0.3 dst-port 3 action 17 Added rule with ID 252 # ./filer_decode /sys/kernel/debug/gfar1/filer_raw 00: MASK == 00000210 AND Q:00 ctrl:00000080 prop:00000210 01: FPR == 00000210 AND CLE Q:00 ctrl:00000281 prop:00000210 02: MASK == ffffffff AND Q:00 ctrl:00000080 prop:ffffffff 03: DPT == 00000003 AND Q:00 ctrl:0000008e prop:00000003 04: DIA == 0a000003 Q:11 ctrl:0000440c prop:0a000003 05: DPT == 00000002 AND Q:00 ctrl:0000008e prop:00000002 06: DIA == 0a000002 Q:09 ctrl:0000240c prop:0a000002 07: DIA == 0a000001 AND Q:00 ctrl:0000008c prop:0a000001 08: DPT == 00000001 CLE Q:01 ctrl:0000060e prop:00000001 ff: MASK >= 00000000 Q:00 ctrl:00000020 prop:00000000 Which looks correct according to the spec but only the first (eth id 252)/last added rule for 10.0.0.3 will ever trigger. As if filer did not treat the AND CLE as cluster start but also kept AND-ing the rules. We found no errata covering this. The fact that nobody noticed (2) or (3) makes me think that this feature is not very widely used and we should just remove it. Reported-by: Aleksander Dutkowski Signed-off-by: Jakub Kicinski Acked-by: Claudiu Manoil Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index f477b67..5b90fcf 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -900,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) return 0; } -static int gfar_comp_asc(const void *a, const void *b) -{ - return memcmp(a, b, 4); -} - -static int gfar_comp_desc(const void *a, const void *b) -{ - return -memcmp(a, b, 4); -} - -static void gfar_swap(void *a, void *b, int size) -{ - u32 *_a = a; - u32 *_b = b; - - swap(_a[0], _b[0]); - swap(_a[1], _b[1]); - swap(_a[2], _b[2]); - swap(_a[3], _b[3]); -} - /* Write a mask to filer cache */ static void gfar_set_mask(u32 mask, struct filer_table *tab) { @@ -1270,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, return 0; } -/* Copy size filer entries */ -static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], - struct gfar_filer_entry src[0], s32 size) -{ - while (size > 0) { - size--; - dst[size].ctrl = src[size].ctrl; - dst[size].prop = src[size].prop; - } -} - -/* Delete the contents of the filer-table between start and end - * and collapse them - */ -static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) -{ - int length; - - if (end > MAX_FILER_CACHE_IDX || end < begin) - return -EINVAL; - - end++; - length = end - begin; - - /* Copy */ - while (end < tab->index) { - tab->fe[begin].ctrl = tab->fe[end].ctrl; - tab->fe[begin++].prop = tab->fe[end++].prop; - - } - /* Fill up with don't cares */ - while (begin < tab->index) { - tab->fe[begin].ctrl = 0x60; - tab->fe[begin].prop = 0xFFFFFFFF; - begin++; - } - - tab->index -= length; - return 0; -} - -/* Make space on the wanted location */ -static int gfar_expand_filer_entries(u32 begin, u32 length, - struct filer_table *tab) -{ - if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || - begin > MAX_FILER_CACHE_IDX) - return -EINVAL; - - gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), - tab->index - length + 1); - - tab->index += length; - return 0; -} - -static int gfar_get_next_cluster_start(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_AND | RQFCR_CLE)) - return start; - } - return -1; -} - -static int gfar_get_next_cluster_end(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_CLE)) - return start; - } - return -1; -} - -/* Uses hardwares clustering option to reduce - * the number of filer table entries - */ -static void gfar_cluster_filer(struct filer_table *tab) -{ - s32 i = -1, j, iend, jend; - - while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { - j = i; - while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { - /* The cluster entries self and the previous one - * (a mask) must be identical! - */ - if (tab->fe[i].ctrl != tab->fe[j].ctrl) - break; - if (tab->fe[i].prop != tab->fe[j].prop) - break; - if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) - break; - if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) - break; - iend = gfar_get_next_cluster_end(i, tab); - jend = gfar_get_next_cluster_end(j, tab); - if (jend == -1 || iend == -1) - break; - - /* First we make some free space, where our cluster - * element should be. Then we copy it there and finally - * delete in from its old location. - */ - if (gfar_expand_filer_entries(iend, (jend - j), tab) == - -EINVAL) - break; - - gfar_copy_filer_entries(&(tab->fe[iend + 1]), - &(tab->fe[jend + 1]), jend - j); - - if (gfar_trim_filer_entries(jend - 1, - jend + (jend - j), - tab) == -EINVAL) - return; - - /* Mask out cluster bit */ - tab->fe[iend].ctrl &= ~(RQFCR_CLE); - } - } -} - -/* Swaps the masked bits of a1<>a2 and b1<>b2 */ -static void gfar_swap_bits(struct gfar_filer_entry *a1, - struct gfar_filer_entry *a2, - struct gfar_filer_entry *b1, - struct gfar_filer_entry *b2, u32 mask) -{ - u32 temp[4]; - temp[0] = a1->ctrl & mask; - temp[1] = a2->ctrl & mask; - temp[2] = b1->ctrl & mask; - temp[3] = b2->ctrl & mask; - - a1->ctrl &= ~mask; - a2->ctrl &= ~mask; - b1->ctrl &= ~mask; - b2->ctrl &= ~mask; - - a1->ctrl |= temp[1]; - a2->ctrl |= temp[0]; - b1->ctrl |= temp[3]; - b2->ctrl |= temp[2]; -} - -/* Generate a list consisting of masks values with their start and - * end of validity and block as indicator for parts belonging - * together (glued by ANDs) in mask_table - */ -static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *tab) -{ - u32 i, and_index = 0, block_index = 1; - - for (i = 0; i < tab->index; i++) { - - /* LSByte of control = 0 sets a mask */ - if (!(tab->fe[i].ctrl & 0xF)) { - mask_table[and_index].mask = tab->fe[i].prop; - mask_table[and_index].start = i; - mask_table[and_index].block = block_index; - if (and_index >= 1) - mask_table[and_index - 1].end = i - 1; - and_index++; - } - /* cluster starts and ends will be separated because they should - * hold their position - */ - if (tab->fe[i].ctrl & RQFCR_CLE) - block_index++; - /* A not set AND indicates the end of a depended block */ - if (!(tab->fe[i].ctrl & RQFCR_AND)) - block_index++; - } - - mask_table[and_index - 1].end = i - 1; - - return and_index; -} - -/* Sorts the entries of mask_table by the values of the masks. - * Important: The 0xFF80 flags of the first and last entry of a - * block must hold their position (which queue, CLusterEnable, ReJEct, - * AND) - */ -static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *temp_table, u32 and_index) -{ - /* Pointer to compare function (_asc or _desc) */ - int (*gfar_comp)(const void *, const void *); - - u32 i, size = 0, start = 0, prev = 1; - u32 old_first, old_last, new_first, new_last; - - gfar_comp = &gfar_comp_desc; - - for (i = 0; i < and_index; i++) { - if (prev != mask_table[i].block) { - old_first = mask_table[start].start + 1; - old_last = mask_table[i - 1].end; - sort(mask_table + start, size, - sizeof(struct gfar_mask_entry), - gfar_comp, &gfar_swap); - - /* Toggle order for every block. This makes the - * thing more efficient! - */ - if (gfar_comp == gfar_comp_desc) - gfar_comp = &gfar_comp_asc; - else - gfar_comp = &gfar_comp_desc; - - new_first = mask_table[start].start + 1; - new_last = mask_table[i - 1].end; - - gfar_swap_bits(&temp_table->fe[new_first], - &temp_table->fe[old_first], - &temp_table->fe[new_last], - &temp_table->fe[old_last], - RQFCR_QUEUE | RQFCR_CLE | - RQFCR_RJE | RQFCR_AND); - - start = i; - size = 0; - } - size++; - prev = mask_table[i].block; - } -} - -/* Reduces the number of masks needed in the filer table to save entries - * This is done by sorting the masks of a depended block. A depended block is - * identified by gluing ANDs or CLE. The sorting order toggles after every - * block. Of course entries in scope of a mask must change their location with - * it. - */ -static int gfar_optimize_filer_masks(struct filer_table *tab) -{ - struct filer_table *temp_table; - struct gfar_mask_entry *mask_table; - - u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; - s32 ret = 0; - - /* We need a copy of the filer table because - * we want to change its order - */ - temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); - if (temp_table == NULL) - return -ENOMEM; - - mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, - sizeof(struct gfar_mask_entry), GFP_KERNEL); - - if (mask_table == NULL) { - ret = -ENOMEM; - goto end; - } - - and_index = gfar_generate_mask_table(mask_table, tab); - - gfar_sort_mask_table(mask_table, temp_table, and_index); - - /* Now we can copy the data from our duplicated filer table to - * the real one in the order the mask table says - */ - for (i = 0; i < and_index; i++) { - size = mask_table[i].end - mask_table[i].start + 1; - gfar_copy_filer_entries(&(tab->fe[j]), - &(temp_table->fe[mask_table[i].start]), size); - j += size; - } - - /* And finally we just have to check for duplicated masks and drop the - * second ones - */ - for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - previous_mask = i++; - break; - } - } - for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - if (tab->fe[i].prop == tab->fe[previous_mask].prop) { - /* Two identical ones found! - * So drop the second one! - */ - gfar_trim_filer_entries(i, i, tab); - } else - /* Not identical! */ - previous_mask = i; - } - } - - kfree(mask_table); -end: kfree(temp_table); - return ret; -} - /* Write the bit-pattern from software's buffer to hardware registers */ static int gfar_write_filer_table(struct gfar_private *priv, struct filer_table *tab) @@ -1620,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) { struct ethtool_flow_spec_container *j; struct filer_table *tab; - s32 i = 0; s32 ret = 0; /* So index is set to zero, too! */ @@ -1645,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) } } - i = tab->index; - - /* Optimizations to save entries */ - gfar_cluster_filer(tab); - gfar_optimize_filer_masks(tab); - - pr_debug("\tSummary:\n" - "\tData on hardware: %d\n" - "\tCompression rate: %d%%\n", - tab->index, 100 - (100 * tab->index) / i); - /* Write everything to hardware */ ret = gfar_write_filer_table(priv, tab); if (ret == -EBUSY) { -- cgit v0.10.2 From e6d006938c9bda7ffd22af9d3e1257fd75941fb7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 13 Aug 2015 00:08:01 +0300 Subject: cosa: missing error code on failure in probe() If register_hdlc_device() fails, the current code returns 0 but we should return an error code instead. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 7193b73..848ea6a 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma) chan->netdev->base_addr = chan->cosa->datareg; chan->netdev->irq = chan->cosa->irq; chan->netdev->dma = chan->cosa->dma; - if (register_hdlc_device(chan->netdev)) { + err = register_hdlc_device(chan->netdev); + if (err) { netdev_warn(chan->netdev, "register_hdlc_device() failed\n"); free_netdev(chan->netdev); -- cgit v0.10.2 From 5c16179b550b9fd8114637a56b153c9768ea06a5 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Tue, 21 Jul 2015 11:00:53 +0200 Subject: EDAC, ppc4xx: Access mci->csrows array elements properly The commit de3910eb79ac ("edac: change the mem allocation scheme to make Documentation/kobject.txt happy") changed the memory allocation for the csrows member. But ppc4xx_edac was forgotten in the patch. Fix it. Signed-off-by: Michael Walle Cc: Cc: linux-edac Cc: Mauro Carvalho Chehab Link: http://lkml.kernel.org/r/1437469253-8611-1-git-send-email-michael@walle.cc Signed-off-by: Borislav Petkov diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 3515b38..711d8ad 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) */ for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *csi = &mci->csrows[row]; + struct csrow_info *csi = mci->csrows[row]; /* * Get the configuration settings for this -- cgit v0.10.2 From ed596cde9425509ec6ce88e19f03e9b13b6f518b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 13 Aug 2015 08:25:20 -0700 Subject: Revert x86 sigcontext cleanups This reverts commits 9a036b93a344 ("x86/signal/64: Remove 'fs' and 'gs' from sigcontext") and c6f2062935c8 ("x86/signal/64: Fix SS handling for signals delivered to 64-bit programs"). They were cleanups, but they break dosemu by changing the signal return behavior (and removing 'fs' and 'gs' from the sigcontext struct - while not actually changing any behavior - causes build problems). Reported-and-tested-by: Stas Sergeev Acked-by: Andy Lutomirski Cc: Ingo Molnar Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 6fe6b18..9dfce4e 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -57,9 +57,9 @@ struct sigcontext { unsigned long ip; unsigned long flags; unsigned short cs; - unsigned short __pad2; /* Was called gs, but was always zero. */ - unsigned short __pad1; /* Was called fs, but was always zero. */ - unsigned short ss; + unsigned short gs; + unsigned short fs; + unsigned short __pad0; unsigned long err; unsigned long trapno; unsigned long oldmask; diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h index 0e8a973..40836a9 100644 --- a/arch/x86/include/uapi/asm/sigcontext.h +++ b/arch/x86/include/uapi/asm/sigcontext.h @@ -177,24 +177,9 @@ struct sigcontext { __u64 rip; __u64 eflags; /* RFLAGS */ __u16 cs; - - /* - * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), - * Linux saved and restored fs and gs in these slots. This - * was counterproductive, as fsbase and gsbase were never - * saved, so arch_prctl was presumably unreliable. - * - * If these slots are ever needed for any other purpose, there - * is some risk that very old 64-bit binaries could get - * confused. I doubt that many such binaries still work, - * though, since the same patch in 2.5.64 also removed the - * 64-bit set_thread_area syscall, so it appears that there is - * no TLS API that works in both pre- and post-2.5.64 kernels. - */ - __u16 __pad2; /* Was gs. */ - __u16 __pad1; /* Was fs. */ - - __u16 ss; + __u16 gs; + __u16 fs; + __u16 __pad0; __u64 err; __u64 trapno; __u64 oldmask; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 206996c..71820c4 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) COPY(r15); #endif /* CONFIG_X86_64 */ +#ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); +#else /* !CONFIG_X86_32 */ + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + COPY_SEG_CPL3(cs); +#endif /* CONFIG_X86_32 */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); @@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, #else /* !CONFIG_X86_32 */ put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->cs, &sc->cs); - put_user_ex(0, &sc->__pad2); - put_user_ex(0, &sc->__pad1); - put_user_ex(regs->ss, &sc->ss); + put_user_ex(0, &sc->gs); + put_user_ex(0, &sc->fs); #endif /* CONFIG_X86_32 */ put_user_ex(fpstate, &sc->fpstate); @@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, regs->sp = (unsigned long)frame; - /* - * Set up the CS and SS registers to run signal handlers in - * 64-bit mode, even if the handler happens to be interrupting - * 32-bit or 16-bit code. - * - * SS is subtle. In 64-bit mode, we don't need any particular - * SS descriptor, but we do need SS to be valid. It's possible - * that the old SS is entirely bogus -- this can happen if the - * signal we're trying to deliver is #GP or #SS caused by a bad - * SS value. - */ + /* Set up the CS register to run signal handlers in 64-bit mode, + even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; - regs->ss = __USER_DS; return 0; } -- cgit v0.10.2