From 55a8d02bbb25edf9589b2f11afec410d053da497 Mon Sep 17 00:00:00 2001 From: Nan Jia Date: Sun, 31 May 2015 19:53:28 +1000 Subject: jfs: removed a prohibited space after opening parenthesis Fixed a coding style issue. Signed-off-by: Nan Jia Signed-off-by: Dave Kleikamp diff --git a/fs/jfs/file.c b/fs/jfs/file.c index e98d39d..b9dc23c 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -76,7 +76,7 @@ static int jfs_open(struct inode *inode, struct file *file) if (ji->active_ag == -1) { struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); - atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]); + atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]); } spin_unlock_irq(&ji->ag_lock); } -- cgit v0.10.2 From 28a7eedd1109c9277390f44aa11de76b673996cd Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 1 Jun 2015 15:00:44 -0700 Subject: memory: omap-gpmc: Fix parsing of devices We currently artificially limit the parsing of GPMC connected devices based on the device name. Let's stop doing that, it's confusing as adding devices to .dts files with using normal names like fpga and usb will currently cause them to not probe. Cc: Roger Quadros Reported-by: Brian Hutchinson Signed-off-by: Tony Lindgren diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index c94ea0d..0e524a1 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2074,14 +2074,8 @@ static int gpmc_probe_dt(struct platform_device *pdev) ret = gpmc_probe_nand_child(pdev, child); else if (of_node_cmp(child->name, "onenand") == 0) ret = gpmc_probe_onenand_child(pdev, child); - else if (of_node_cmp(child->name, "ethernet") == 0 || - of_node_cmp(child->name, "nor") == 0 || - of_node_cmp(child->name, "uart") == 0) + else ret = gpmc_probe_generic_child(pdev, child); - - if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", - __func__, child->full_name)) - of_node_put(child); } return 0; -- cgit v0.10.2 From e3abe2556b2a689b28926cd1581f0b97e9d2afa4 Mon Sep 17 00:00:00 2001 From: Nicholas Krause Date: Thu, 28 May 2015 11:00:05 -0400 Subject: ARM: OMAP2+: Remove unnessary return statement from the void function, omap2_show_dma_caps This removes the no longer required return statement at the end of the void function, omap2_show_dma_cap due to no need for a return statement due to this function always running successfully. Signed-off-by: Nicholas Krause Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c index e1a56d8..1ed4be1 100644 --- a/arch/arm/mach-omap2/dma.c +++ b/arch/arm/mach-omap2/dma.c @@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void) u8 revision = dma_read(REVISION, 0) & 0xff; printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", revision >> 4, revision & 0xf); - return; } static unsigned configure_dma_errata(void) -- cgit v0.10.2 From 1e25aa9641e8f3fa39cd5e46b4afcafd7f12a44b Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 1 Jun 2015 16:36:27 -0700 Subject: hid-sensor: Fix suspend/resume delay By default all the sensors are runtime suspended state (lowest power state). During Linux suspend process, all the run time suspended devices are resumed and then suspended. This caused all sensors to power up and introduced delay in suspend time, when we introduced runtime PM for HID sensors. The opposite process happens during resume process. To fix this, we do powerup process of the sensors only when the request is issued from user (raw or tiggerred). In this way when runtime, resume calls for powerup it will simply return as this will not match user requested state. Note this is a regression fix as the increase in suspend / resume times can be substantial (report of 8 seconds on Len's laptop!) Signed-off-by: Srinivas Pandruvada Tested-by: Len Brown Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 610fc98..5955110 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) s32 poll_value = 0; if (state) { + if (!atomic_read(&st->user_requested_state)) + return 0; if (sensor_hub_device_open(st->hsdev)) return -EIO; @@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) poll_value = hid_sensor_read_poll_value(st); } else { - if (!atomic_dec_and_test(&st->data_ready)) + int val; + + val = atomic_dec_if_positive(&st->data_ready); + if (val < 0) return 0; + sensor_hub_device_close(st->hsdev); state_val = hid_sensor_get_usage_index(st->hsdev, st->power_state.report_id, @@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state); int hid_sensor_power_state(struct hid_sensor_common *st, bool state) { + #ifdef CONFIG_PM int ret; + atomic_set(&st->user_requested_state, state); if (state) ret = pm_runtime_get_sync(&st->pdev->dev); else { @@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) return 0; #else + atomic_set(&st->user_requested_state, state); return _hid_sensor_power_state(st, state); #endif } diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 0408421..cd224df 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -230,6 +230,7 @@ struct hid_sensor_common { struct platform_device *pdev; unsigned usage_id; atomic_t data_ready; + atomic_t user_requested_state; struct iio_trigger *trigger; struct hid_sensor_hub_attribute_info poll; struct hid_sensor_hub_attribute_info report_state; -- cgit v0.10.2 From f7f31adf07d1c9e66c160e9bdd77e12531cd6996 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 4 Jun 2015 17:57:03 +0100 Subject: jfs: fix indentation on if statement The if statement and closing brace are indented by 1 extra space, so remove this extra spacing. Cosmetic change only. Signed-off-by: Colin Ian King Signed-off-by: Dave Kleikamp diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 070dc4b..28d69fa 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -133,11 +133,11 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) * It has been committed since the last change, but was still * on the dirty inode list. */ - if (!test_cflag(COMMIT_Dirty, inode)) { + if (!test_cflag(COMMIT_Dirty, inode)) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait); return 0; - } + } if (jfs_commit_inode(inode, wait)) { jfs_err("jfs_write_inode: jfs_commit_inode failed!"); -- cgit v0.10.2 From dc7b8d98ac003c9f1e83a5f927c372dac6f114a1 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Mon, 25 May 2015 22:36:58 +0200 Subject: iio: adc: rockchip_saradc: add missing MODULE_* data The module-data is currently missing. This includes the license-information which makes the driver taint the kernel and miss symbols when compiled as module. Fixes: 44d6f2ef94f9 ("iio: adc: add driver for Rockchip saradc") Signed-off-by: Heiko Stuebner Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 8d4e019..9c311c1 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = { }; module_platform_driver(rockchip_saradc_driver); + +MODULE_AUTHOR("Heiko Stuebner "); +MODULE_DESCRIPTION("Rockchip SARADC driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 6c0d48cb29c29b306ba3548afb45154d22eb4d78 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 24 May 2015 17:39:04 -0300 Subject: iio: twl4030-madc: Pass the IRQF_ONESHOT flag Since commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests") threaded IRQs without a primary handler need to be requested with IRQF_ONESHOT, otherwise the request will fail. So pass the IRQF_ONESHOT flag in this case. The semantic patch that makes this change is available in scripts/coccinelle/misc/irqf_oneshot.cocci. Signed-off-by: Fabio Estevam Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index 94c5f05..4caecbe 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c @@ -835,7 +835,8 @@ static int twl4030_madc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, twl4030_madc_threaded_irq_handler, - IRQF_TRIGGER_RISING, "twl4030_madc", madc); + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "twl4030_madc", madc); if (ret) { dev_err(&pdev->dev, "could not request irq\n"); goto err_i2c; -- cgit v0.10.2 From 27b0d37e4209f3fc40c65e2b7936ee4154f4070d Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Wed, 20 May 2015 08:08:27 +0000 Subject: ARM: dts: atlas7: add pinctrl and gpio descriptions This patch adds pinctrl and gpio stuff according to the atlas7 pinctrl driver. Signed-off-by: Wei Chen Signed-off-by: Barry Song Acked-by: Linus Walleij diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi index a753178..1928f78 100644 --- a/arch/arm/boot/dts/atlas7.dtsi +++ b/arch/arm/boot/dts/atlas7.dtsi @@ -120,6 +120,1025 @@ compatible = "sirf,atlas7-ioc"; reg = <0x18880000 0x1000>, <0x10E40000 0x1000>; + + audio_ac97_pmx: audio_ac97@0 { + audio_ac97 { + groups = "audio_ac97_grp"; + function = "audio_ac97"; + }; + }; + + audio_func_dbg_pmx: audio_func_dbg@0 { + audio_func_dbg { + groups = "audio_func_dbg_grp"; + function = "audio_func_dbg"; + }; + }; + + audio_i2s_pmx: audio_i2s@0 { + audio_i2s { + groups = "audio_i2s_grp"; + function = "audio_i2s"; + }; + }; + + audio_i2s_2ch_pmx: audio_i2s_2ch@0 { + audio_i2s_2ch { + groups = "audio_i2s_2ch_grp"; + function = "audio_i2s_2ch"; + }; + }; + + audio_i2s_extclk_pmx: audio_i2s_extclk@0 { + audio_i2s_extclk { + groups = "audio_i2s_extclk_grp"; + function = "audio_i2s_extclk"; + }; + }; + + audio_uart0_pmx: audio_uart0@0 { + audio_uart0 { + groups = "audio_uart0_grp"; + function = "audio_uart0"; + }; + }; + + audio_uart1_pmx: audio_uart1@0 { + audio_uart1 { + groups = "audio_uart1_grp"; + function = "audio_uart1"; + }; + }; + + audio_uart2_pmx0: audio_uart2@0 { + audio_uart2_0 { + groups = "audio_uart2_grp0"; + function = "audio_uart2_m0"; + }; + }; + + audio_uart2_pmx1: audio_uart2@1 { + audio_uart2_1 { + groups = "audio_uart2_grp1"; + function = "audio_uart2_m1"; + }; + }; + + c_can_trnsvr_pmx: c_can_trnsvr@0 { + c_can_trnsvr { + groups = "c_can_trnsvr_grp"; + function = "c_can_trnsvr"; + }; + }; + + c0_can_pmx0: c0_can@0 { + c0_can_0 { + groups = "c0_can_grp0"; + function = "c0_can_m0"; + }; + }; + + c0_can_pmx1: c0_can@1 { + c0_can_1 { + groups = "c0_can_grp1"; + function = "c0_can_m1"; + }; + }; + + c1_can_pmx0: c1_can@0 { + c1_can_0 { + groups = "c1_can_grp0"; + function = "c1_can_m0"; + }; + }; + + c1_can_pmx1: c1_can@1 { + c1_can_1 { + groups = "c1_can_grp1"; + function = "c1_can_m1"; + }; + }; + + c1_can_pmx2: c1_can@2 { + c1_can_2 { + groups = "c1_can_grp2"; + function = "c1_can_m2"; + }; + }; + + ca_audio_lpc_pmx: ca_audio_lpc@0 { + ca_audio_lpc { + groups = "ca_audio_lpc_grp"; + function = "ca_audio_lpc"; + }; + }; + + ca_bt_lpc_pmx: ca_bt_lpc@0 { + ca_bt_lpc { + groups = "ca_bt_lpc_grp"; + function = "ca_bt_lpc"; + }; + }; + + ca_coex_pmx: ca_coex@0 { + ca_coex { + groups = "ca_coex_grp"; + function = "ca_coex"; + }; + }; + + ca_curator_lpc_pmx: ca_curator_lpc@0 { + ca_curator_lpc { + groups = "ca_curator_lpc_grp"; + function = "ca_curator_lpc"; + }; + }; + + ca_pcm_debug_pmx: ca_pcm_debug@0 { + ca_pcm_debug { + groups = "ca_pcm_debug_grp"; + function = "ca_pcm_debug"; + }; + }; + + ca_pio_pmx: ca_pio@0 { + ca_pio { + groups = "ca_pio_grp"; + function = "ca_pio"; + }; + }; + + ca_sdio_debug_pmx: ca_sdio_debug@0 { + ca_sdio_debug { + groups = "ca_sdio_debug_grp"; + function = "ca_sdio_debug"; + }; + }; + + ca_spi_pmx: ca_spi@0 { + ca_spi { + groups = "ca_spi_grp"; + function = "ca_spi"; + }; + }; + + ca_trb_pmx: ca_trb@0 { + ca_trb { + groups = "ca_trb_grp"; + function = "ca_trb"; + }; + }; + + ca_uart_debug_pmx: ca_uart_debug@0 { + ca_uart_debug { + groups = "ca_uart_debug_grp"; + function = "ca_uart_debug"; + }; + }; + + clkc_pmx0: clkc@0 { + clkc_0 { + groups = "clkc_grp0"; + function = "clkc_m0"; + }; + }; + + clkc_pmx1: clkc@1 { + clkc_1 { + groups = "clkc_grp1"; + function = "clkc_m1"; + }; + }; + + gn_gnss_i2c_pmx: gn_gnss_i2c@0 { + gn_gnss_i2c { + groups = "gn_gnss_i2c_grp"; + function = "gn_gnss_i2c"; + }; + }; + + gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 { + gn_gnss_uart_nopause { + groups = "gn_gnss_uart_nopause_grp"; + function = "gn_gnss_uart_nopause"; + }; + }; + + gn_gnss_uart_pmx: gn_gnss_uart@0 { + gn_gnss_uart { + groups = "gn_gnss_uart_grp"; + function = "gn_gnss_uart"; + }; + }; + + gn_trg_spi_pmx0: gn_trg_spi@0 { + gn_trg_spi_0 { + groups = "gn_trg_spi_grp0"; + function = "gn_trg_spi_m0"; + }; + }; + + gn_trg_spi_pmx1: gn_trg_spi@1 { + gn_trg_spi_1 { + groups = "gn_trg_spi_grp1"; + function = "gn_trg_spi_m1"; + }; + }; + + cvbs_dbg_pmx: cvbs_dbg@0 { + cvbs_dbg { + groups = "cvbs_dbg_grp"; + function = "cvbs_dbg"; + }; + }; + + cvbs_dbg_test_pmx0: cvbs_dbg_test@0 { + cvbs_dbg_test_0 { + groups = "cvbs_dbg_test_grp0"; + function = "cvbs_dbg_test_m0"; + }; + }; + + cvbs_dbg_test_pmx1: cvbs_dbg_test@1 { + cvbs_dbg_test_1 { + groups = "cvbs_dbg_test_grp1"; + function = "cvbs_dbg_test_m1"; + }; + }; + + cvbs_dbg_test_pmx2: cvbs_dbg_test@2 { + cvbs_dbg_test_2 { + groups = "cvbs_dbg_test_grp2"; + function = "cvbs_dbg_test_m2"; + }; + }; + + cvbs_dbg_test_pmx3: cvbs_dbg_test@3 { + cvbs_dbg_test_3 { + groups = "cvbs_dbg_test_grp3"; + function = "cvbs_dbg_test_m3"; + }; + }; + + cvbs_dbg_test_pmx4: cvbs_dbg_test@4 { + cvbs_dbg_test_4 { + groups = "cvbs_dbg_test_grp4"; + function = "cvbs_dbg_test_m4"; + }; + }; + + cvbs_dbg_test_pmx5: cvbs_dbg_test@5 { + cvbs_dbg_test_5 { + groups = "cvbs_dbg_test_grp5"; + function = "cvbs_dbg_test_m5"; + }; + }; + + cvbs_dbg_test_pmx6: cvbs_dbg_test@6 { + cvbs_dbg_test_6 { + groups = "cvbs_dbg_test_grp6"; + function = "cvbs_dbg_test_m6"; + }; + }; + + cvbs_dbg_test_pmx7: cvbs_dbg_test@7 { + cvbs_dbg_test_7 { + groups = "cvbs_dbg_test_grp7"; + function = "cvbs_dbg_test_m7"; + }; + }; + + cvbs_dbg_test_pmx8: cvbs_dbg_test@8 { + cvbs_dbg_test_8 { + groups = "cvbs_dbg_test_grp8"; + function = "cvbs_dbg_test_m8"; + }; + }; + + cvbs_dbg_test_pmx9: cvbs_dbg_test@9 { + cvbs_dbg_test_9 { + groups = "cvbs_dbg_test_grp9"; + function = "cvbs_dbg_test_m9"; + }; + }; + + cvbs_dbg_test_pmx10: cvbs_dbg_test@10 { + cvbs_dbg_test_10 { + groups = "cvbs_dbg_test_grp10"; + function = "cvbs_dbg_test_m10"; + }; + }; + + cvbs_dbg_test_pmx11: cvbs_dbg_test@11 { + cvbs_dbg_test_11 { + groups = "cvbs_dbg_test_grp11"; + function = "cvbs_dbg_test_m11"; + }; + }; + + cvbs_dbg_test_pmx12: cvbs_dbg_test@12 { + cvbs_dbg_test_12 { + groups = "cvbs_dbg_test_grp12"; + function = "cvbs_dbg_test_m12"; + }; + }; + + cvbs_dbg_test_pmx13: cvbs_dbg_test@13 { + cvbs_dbg_test_13 { + groups = "cvbs_dbg_test_grp13"; + function = "cvbs_dbg_test_m13"; + }; + }; + + cvbs_dbg_test_pmx14: cvbs_dbg_test@14 { + cvbs_dbg_test_14 { + groups = "cvbs_dbg_test_grp14"; + function = "cvbs_dbg_test_m14"; + }; + }; + + cvbs_dbg_test_pmx15: cvbs_dbg_test@15 { + cvbs_dbg_test_15 { + groups = "cvbs_dbg_test_grp15"; + function = "cvbs_dbg_test_m15"; + }; + }; + + gn_gnss_power_pmx: gn_gnss_power@0 { + gn_gnss_power { + groups = "gn_gnss_power_grp"; + function = "gn_gnss_power"; + }; + }; + + gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 { + gn_gnss_sw_status { + groups = "gn_gnss_sw_status_grp"; + function = "gn_gnss_sw_status"; + }; + }; + + gn_gnss_eclk_pmx: gn_gnss_eclk@0 { + gn_gnss_eclk { + groups = "gn_gnss_eclk_grp"; + function = "gn_gnss_eclk"; + }; + }; + + gn_gnss_irq1_pmx0: gn_gnss_irq1@0 { + gn_gnss_irq1_0 { + groups = "gn_gnss_irq1_grp0"; + function = "gn_gnss_irq1_m0"; + }; + }; + + gn_gnss_irq2_pmx0: gn_gnss_irq2@0 { + gn_gnss_irq2_0 { + groups = "gn_gnss_irq2_grp0"; + function = "gn_gnss_irq2_m0"; + }; + }; + + gn_gnss_tm_pmx: gn_gnss_tm@0 { + gn_gnss_tm { + groups = "gn_gnss_tm_grp"; + function = "gn_gnss_tm"; + }; + }; + + gn_gnss_tsync_pmx: gn_gnss_tsync@0 { + gn_gnss_tsync { + groups = "gn_gnss_tsync_grp"; + function = "gn_gnss_tsync"; + }; + }; + + gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 { + gn_io_gnsssys_sw_cfg { + groups = "gn_io_gnsssys_sw_cfg_grp"; + function = "gn_io_gnsssys_sw_cfg"; + }; + }; + + gn_trg_pmx0: gn_trg@0 { + gn_trg_0 { + groups = "gn_trg_grp0"; + function = "gn_trg_m0"; + }; + }; + + gn_trg_pmx1: gn_trg@1 { + gn_trg_1 { + groups = "gn_trg_grp1"; + function = "gn_trg_m1"; + }; + }; + + gn_trg_shutdown_pmx0: gn_trg_shutdown@0 { + gn_trg_shutdown_0 { + groups = "gn_trg_shutdown_grp0"; + function = "gn_trg_shutdown_m0"; + }; + }; + + gn_trg_shutdown_pmx1: gn_trg_shutdown@1 { + gn_trg_shutdown_1 { + groups = "gn_trg_shutdown_grp1"; + function = "gn_trg_shutdown_m1"; + }; + }; + + gn_trg_shutdown_pmx2: gn_trg_shutdown@2 { + gn_trg_shutdown_2 { + groups = "gn_trg_shutdown_grp2"; + function = "gn_trg_shutdown_m2"; + }; + }; + + gn_trg_shutdown_pmx3: gn_trg_shutdown@3 { + gn_trg_shutdown_3 { + groups = "gn_trg_shutdown_grp3"; + function = "gn_trg_shutdown_m3"; + }; + }; + + i2c0_pmx: i2c0@0 { + i2c0 { + groups = "i2c0_grp"; + function = "i2c0"; + }; + }; + + i2c1_pmx: i2c1@0 { + i2c1 { + groups = "i2c1_grp"; + function = "i2c1"; + }; + }; + + jtag_pmx0: jtag@0 { + jtag_0 { + groups = "jtag_grp0"; + function = "jtag_m0"; + }; + }; + + ks_kas_spi_pmx0: ks_kas_spi@0 { + ks_kas_spi_0 { + groups = "ks_kas_spi_grp0"; + function = "ks_kas_spi_m0"; + }; + }; + + ld_ldd_pmx: ld_ldd@0 { + ld_ldd { + groups = "ld_ldd_grp"; + function = "ld_ldd"; + }; + }; + + ld_ldd_16bit_pmx: ld_ldd_16bit@0 { + ld_ldd_16bit { + groups = "ld_ldd_16bit_grp"; + function = "ld_ldd_16bit"; + }; + }; + + ld_ldd_fck_pmx: ld_ldd_fck@0 { + ld_ldd_fck { + groups = "ld_ldd_fck_grp"; + function = "ld_ldd_fck"; + }; + }; + + ld_ldd_lck_pmx: ld_ldd_lck@0 { + ld_ldd_lck { + groups = "ld_ldd_lck_grp"; + function = "ld_ldd_lck"; + }; + }; + + lr_lcdrom_pmx: lr_lcdrom@0 { + lr_lcdrom { + groups = "lr_lcdrom_grp"; + function = "lr_lcdrom"; + }; + }; + + lvds_analog_pmx: lvds_analog@0 { + lvds_analog { + groups = "lvds_analog_grp"; + function = "lvds_analog"; + }; + }; + + nd_df_pmx: nd_df@0 { + nd_df { + groups = "nd_df_grp"; + function = "nd_df"; + }; + }; + + nd_df_nowp_pmx: nd_df_nowp@0 { + nd_df_nowp { + groups = "nd_df_nowp_grp"; + function = "nd_df_nowp"; + }; + }; + + ps_pmx: ps@0 { + ps { + groups = "ps_grp"; + function = "ps"; + }; + }; + + pwc_core_on_pmx: pwc_core_on@0 { + pwc_core_on { + groups = "pwc_core_on_grp"; + function = "pwc_core_on"; + }; + }; + + pwc_ext_on_pmx: pwc_ext_on@0 { + pwc_ext_on { + groups = "pwc_ext_on_grp"; + function = "pwc_ext_on"; + }; + }; + + pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 { + pwc_gpio3_clk { + groups = "pwc_gpio3_clk_grp"; + function = "pwc_gpio3_clk"; + }; + }; + + pwc_io_on_pmx: pwc_io_on@0 { + pwc_io_on { + groups = "pwc_io_on_grp"; + function = "pwc_io_on"; + }; + }; + + pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 { + pwc_lowbatt_b_0 { + groups = "pwc_lowbatt_b_grp0"; + function = "pwc_lowbatt_b_m0"; + }; + }; + + pwc_mem_on_pmx: pwc_mem_on@0 { + pwc_mem_on { + groups = "pwc_mem_on_grp"; + function = "pwc_mem_on"; + }; + }; + + pwc_on_key_b_pmx0: pwc_on_key_b@0 { + pwc_on_key_b_0 { + groups = "pwc_on_key_b_grp0"; + function = "pwc_on_key_b_m0"; + }; + }; + + pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 { + pwc_wakeup_src0 { + groups = "pwc_wakeup_src0_grp"; + function = "pwc_wakeup_src0"; + }; + }; + + pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 { + pwc_wakeup_src1 { + groups = "pwc_wakeup_src1_grp"; + function = "pwc_wakeup_src1"; + }; + }; + + pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 { + pwc_wakeup_src2 { + groups = "pwc_wakeup_src2_grp"; + function = "pwc_wakeup_src2"; + }; + }; + + pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 { + pwc_wakeup_src3 { + groups = "pwc_wakeup_src3_grp"; + function = "pwc_wakeup_src3"; + }; + }; + + pw_cko0_pmx0: pw_cko0@0 { + pw_cko0_0 { + groups = "pw_cko0_grp0"; + function = "pw_cko0_m0"; + }; + }; + + pw_cko0_pmx1: pw_cko0@1 { + pw_cko0_1 { + groups = "pw_cko0_grp1"; + function = "pw_cko0_m1"; + }; + }; + + pw_cko0_pmx2: pw_cko0@2 { + pw_cko0_2 { + groups = "pw_cko0_grp2"; + function = "pw_cko0_m2"; + }; + }; + + pw_cko1_pmx0: pw_cko1@0 { + pw_cko1_0 { + groups = "pw_cko1_grp0"; + function = "pw_cko1_m0"; + }; + }; + + pw_cko1_pmx1: pw_cko1@1 { + pw_cko1_1 { + groups = "pw_cko1_grp1"; + function = "pw_cko1_m1"; + }; + }; + + pw_i2s01_clk_pmx0: pw_i2s01_clk@0 { + pw_i2s01_clk_0 { + groups = "pw_i2s01_clk_grp0"; + function = "pw_i2s01_clk_m0"; + }; + }; + + pw_i2s01_clk_pmx1: pw_i2s01_clk@1 { + pw_i2s01_clk_1 { + groups = "pw_i2s01_clk_grp1"; + function = "pw_i2s01_clk_m1"; + }; + }; + + pw_pwm0_pmx: pw_pwm0@0 { + pw_pwm0 { + groups = "pw_pwm0_grp"; + function = "pw_pwm0"; + }; + }; + + pw_pwm1_pmx: pw_pwm1@0 { + pw_pwm1 { + groups = "pw_pwm1_grp"; + function = "pw_pwm1"; + }; + }; + + pw_pwm2_pmx0: pw_pwm2@0 { + pw_pwm2_0 { + groups = "pw_pwm2_grp0"; + function = "pw_pwm2_m0"; + }; + }; + + pw_pwm2_pmx1: pw_pwm2@1 { + pw_pwm2_1 { + groups = "pw_pwm2_grp1"; + function = "pw_pwm2_m1"; + }; + }; + + pw_pwm3_pmx0: pw_pwm3@0 { + pw_pwm3_0 { + groups = "pw_pwm3_grp0"; + function = "pw_pwm3_m0"; + }; + }; + + pw_pwm3_pmx1: pw_pwm3@1 { + pw_pwm3_1 { + groups = "pw_pwm3_grp1"; + function = "pw_pwm3_m1"; + }; + }; + + pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 { + pw_pwm_cpu_vol_0 { + groups = "pw_pwm_cpu_vol_grp0"; + function = "pw_pwm_cpu_vol_m0"; + }; + }; + + pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 { + pw_pwm_cpu_vol_1 { + groups = "pw_pwm_cpu_vol_grp1"; + function = "pw_pwm_cpu_vol_m1"; + }; + }; + + pw_backlight_pmx0: pw_backlight@0 { + pw_backlight_0 { + groups = "pw_backlight_grp0"; + function = "pw_backlight_m0"; + }; + }; + + pw_backlight_pmx1: pw_backlight@1 { + pw_backlight_1 { + groups = "pw_backlight_grp1"; + function = "pw_backlight_m1"; + }; + }; + + rg_eth_mac_pmx: rg_eth_mac@0 { + rg_eth_mac { + groups = "rg_eth_mac_grp"; + function = "rg_eth_mac"; + }; + }; + + rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 { + rg_gmac_phy_intr_n { + groups = "rg_gmac_phy_intr_n_grp"; + function = "rg_gmac_phy_intr_n"; + }; + }; + + rg_rgmii_mac_pmx: rg_rgmii_mac@0 { + rg_rgmii_mac { + groups = "rg_rgmii_mac_grp"; + function = "rg_rgmii_mac"; + }; + }; + + rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 { + rg_rgmii_phy_ref_clk_0 { + groups = + "rg_rgmii_phy_ref_clk_grp0"; + function = + "rg_rgmii_phy_ref_clk_m0"; + }; + }; + + rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 { + rg_rgmii_phy_ref_clk_1 { + groups = + "rg_rgmii_phy_ref_clk_grp1"; + function = + "rg_rgmii_phy_ref_clk_m1"; + }; + }; + + sd0_pmx: sd0@0 { + sd0 { + groups = "sd0_grp"; + function = "sd0"; + }; + }; + + sd0_4bit_pmx: sd0_4bit@0 { + sd0_4bit { + groups = "sd0_4bit_grp"; + function = "sd0_4bit"; + }; + }; + + sd1_pmx: sd1@0 { + sd1 { + groups = "sd1_grp"; + function = "sd1"; + }; + }; + + sd1_4bit_pmx0: sd1_4bit@0 { + sd1_4bit_0 { + groups = "sd1_4bit_grp0"; + function = "sd1_4bit_m0"; + }; + }; + + sd1_4bit_pmx1: sd1_4bit@1 { + sd1_4bit_1 { + groups = "sd1_4bit_grp1"; + function = "sd1_4bit_m1"; + }; + }; + + sd2_pmx0: sd2@0 { + sd2_0 { + groups = "sd2_grp0"; + function = "sd2_m0"; + }; + }; + + sd2_no_cdb_pmx0: sd2_no_cdb@0 { + sd2_no_cdb_0 { + groups = "sd2_no_cdb_grp0"; + function = "sd2_no_cdb_m0"; + }; + }; + + sd3_pmx: sd3@0 { + sd3 { + groups = "sd3_grp"; + function = "sd3"; + }; + }; + + sd5_pmx: sd5@0 { + sd5 { + groups = "sd5_grp"; + function = "sd5"; + }; + }; + + sd6_pmx0: sd6@0 { + sd6_0 { + groups = "sd6_grp0"; + function = "sd6_m0"; + }; + }; + + sd6_pmx1: sd6@1 { + sd6_1 { + groups = "sd6_grp1"; + function = "sd6_m1"; + }; + }; + + sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 { + sp0_ext_ldo_on { + groups = "sp0_ext_ldo_on_grp"; + function = "sp0_ext_ldo_on"; + }; + }; + + sp0_qspi_pmx: sp0_qspi@0 { + sp0_qspi { + groups = "sp0_qspi_grp"; + function = "sp0_qspi"; + }; + }; + + sp1_spi_pmx: sp1_spi@0 { + sp1_spi { + groups = "sp1_spi_grp"; + function = "sp1_spi"; + }; + }; + + tpiu_trace_pmx: tpiu_trace@0 { + tpiu_trace { + groups = "tpiu_trace_grp"; + function = "tpiu_trace"; + }; + }; + + uart0_pmx: uart0@0 { + uart0 { + groups = "uart0_grp"; + function = "uart0"; + }; + }; + + uart0_nopause_pmx: uart0_nopause@0 { + uart0_nopause { + groups = "uart0_nopause_grp"; + function = "uart0_nopause"; + }; + }; + + uart1_pmx: uart1@0 { + uart1 { + groups = "uart1_grp"; + function = "uart1"; + }; + }; + + uart2_pmx: uart2@0 { + uart2 { + groups = "uart2_grp"; + function = "uart2"; + }; + }; + + uart3_pmx0: uart3@0 { + uart3_0 { + groups = "uart3_grp0"; + function = "uart3_m0"; + }; + }; + + uart3_pmx1: uart3@1 { + uart3_1 { + groups = "uart3_grp1"; + function = "uart3_m1"; + }; + }; + + uart3_pmx2: uart3@2 { + uart3_2 { + groups = "uart3_grp2"; + function = "uart3_m2"; + }; + }; + + uart3_pmx3: uart3@3 { + uart3_3 { + groups = "uart3_grp3"; + function = "uart3_m3"; + }; + }; + + uart3_nopause_pmx0: uart3_nopause@0 { + uart3_nopause_0 { + groups = "uart3_nopause_grp0"; + function = "uart3_nopause_m0"; + }; + }; + + uart3_nopause_pmx1: uart3_nopause@1 { + uart3_nopause_1 { + groups = "uart3_nopause_grp1"; + function = "uart3_nopause_m1"; + }; + }; + + uart4_pmx0: uart4@0 { + uart4_0 { + groups = "uart4_grp0"; + function = "uart4_m0"; + }; + }; + + uart4_pmx1: uart4@1 { + uart4_1 { + groups = "uart4_grp1"; + function = "uart4_m1"; + }; + }; + + uart4_pmx2: uart4@2 { + uart4_2 { + groups = "uart4_grp2"; + function = "uart4_m2"; + }; + }; + + uart4_nopause_pmx: uart4_nopause@0 { + uart4_nopause { + groups = "uart4_nopause_grp"; + function = "uart4_nopause"; + }; + }; + + usb0_drvvbus_pmx: usb0_drvvbus@0 { + usb0_drvvbus { + groups = "usb0_drvvbus_grp"; + function = "usb0_drvvbus"; + }; + }; + + usb1_drvvbus_pmx: usb1_drvvbus@0 { + usb1_drvvbus { + groups = "usb1_drvvbus_grp"; + function = "usb1_drvvbus"; + }; + }; + + visbus_dout_pmx: visbus_dout@0 { + visbus_dout { + groups = "visbus_dout_grp"; + function = "visbus_dout"; + }; + }; + + vi_vip1_pmx: vi_vip1@0 { + vi_vip1 { + groups = "vi_vip1_grp"; + function = "vi_vip1"; + }; + }; + + vi_vip1_ext_pmx: vi_vip1_ext@0 { + vi_vip1_ext { + groups = "vi_vip1_ext_grp"; + function = "vi_vip1_ext"; + }; + }; + + vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 { + vi_vip1_low8bit { + groups = "vi_vip1_low8bit_grp"; + function = "vi_vip1_low8bit"; + }; + }; + + vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 { + vi_vip1_high8bit { + groups = "vi_vip1_high8bit_grp"; + function = "vi_vip1_high8bit"; + }; + }; }; pmipc { @@ -341,6 +1360,12 @@ clock-names = "gpio0_io"; gpio-controller; interrupt-controller; + + gpio-banks = <2>; + gpio-ranges = <&pinctrl 0 0 0>, + <&pinctrl 32 0 0>; + gpio-ranges-group-names = "lvds_gpio_grp", + "uart_nand_gpio_grp"; }; nand@17050000 { @@ -446,11 +1471,22 @@ #interrupt-cells = <2>; compatible = "sirf,atlas7-gpio"; reg = <0x13300000 0x1000>; - interrupts = <0 43 0>, <0 44 0>, <0 45 0>; + interrupts = <0 43 0>, <0 44 0>, + <0 45 0>, <0 46 0>; clocks = <&car 84>; clock-names = "gpio1_io"; gpio-controller; interrupt-controller; + + gpio-banks = <4>; + gpio-ranges = <&pinctrl 0 0 0>, + <&pinctrl 32 0 0>, + <&pinctrl 64 0 0>, + <&pinctrl 96 0 0>; + gpio-ranges-group-names = "gnss_gpio_grp", + "lcd_vip_gpio_grp", + "sdio_i2s_gpio_grp", + "sp_rgmii_gpio_grp"; }; sd2: sdhci@14200000 { @@ -729,6 +1765,10 @@ interrupts = <0 47 0>; gpio-controller; interrupt-controller; + + gpio-banks = <1>; + gpio-ranges = <&pinctrl 0 0 0>; + gpio-ranges-group-names = "rtc_gpio_grp"; }; rtc-iobg@18840000 { -- cgit v0.10.2 From b1999477ed91c3c33891acfe0e18a4457e5c4915 Mon Sep 17 00:00:00 2001 From: Guo Zeng Date: Tue, 14 Apr 2015 11:55:55 +0000 Subject: ARM: prima2: move to use REGMAP APIs for rtciobrg all devices behind rtciobrg needs a special way to access. currently they are using a platform-specific API. this patch moves to REGMAP, then clients can use regmap APIs to read/write. for the moment, old APIs are still kept, once all clients move to regmap, old APIs will be dropped. this patch also does minor clean for comments, authors statement. Signed-off-by: Guo Zeng Signed-off-by: Barry Song diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig index e03d8b5..9ab8932 100644 --- a/arch/arm/mach-prima2/Kconfig +++ b/arch/arm/mach-prima2/Kconfig @@ -4,6 +4,7 @@ menuconfig ARCH_SIRF select ARCH_REQUIRE_GPIOLIB select GENERIC_IRQ_CHIP select NO_IOPORT_MAP + select REGMAP select PINCTRL select PINCTRL_SIRF help diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c index 8f66d8f..d4852d2 100644 --- a/arch/arm/mach-prima2/rtciobrg.c +++ b/arch/arm/mach-prima2/rtciobrg.c @@ -1,5 +1,5 @@ /* - * RTC I/O Bridge interfaces for CSR SiRFprimaII + * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr) { unsigned long flags, val; + /* TODO: add hwspinlock to sync with M3 */ spin_lock_irqsave(&rtciobrg_lock, flags); val = __sirfsoc_rtc_iobrg_readl(addr); @@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr) { unsigned long flags; + /* TODO: add hwspinlock to sync with M3 */ spin_lock_irqsave(&rtciobrg_lock, flags); sirfsoc_rtc_iobrg_pre_writel(val, addr); @@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr) } EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel); + +static int regmap_iobg_regwrite(void *context, unsigned int reg, + unsigned int val) +{ + sirfsoc_rtc_iobrg_writel(val, reg); + return 0; +} + +static int regmap_iobg_regread(void *context, unsigned int reg, + unsigned int *val) +{ + *val = (u32)sirfsoc_rtc_iobrg_readl(reg); + return 0; +} + +static struct regmap_bus regmap_iobg = { + .reg_write = regmap_iobg_regwrite, + .reg_read = regmap_iobg_regread, +}; + +/** + * devm_regmap_init_iobg(): Initialise managed register map + * + * @iobg: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_iobg(struct device *dev, + const struct regmap_config *config) +{ + const struct regmap_bus *bus = ®map_iobg; + + return devm_regmap_init(dev, bus, dev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_iobg); + static const struct of_device_id rtciobrg_ids[] = { { .compatible = "sirf,prima2-rtciobg" }, {} @@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void) } postcore_initcall(sirfsoc_rtciobrg_init); -MODULE_AUTHOR("Zhiwu Song , " - "Barry Song "); +MODULE_AUTHOR("Zhiwu Song "); +MODULE_AUTHOR("Barry Song "); MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge"); MODULE_LICENSE("GPL v2"); diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h index 2c92e1c..aefd997 100644 --- a/include/linux/rtc/sirfsoc_rtciobrg.h +++ b/include/linux/rtc/sirfsoc_rtciobrg.h @@ -9,10 +9,14 @@ #ifndef _SIRFSOC_RTC_IOBRG_H_ #define _SIRFSOC_RTC_IOBRG_H_ +struct regmap_config; + extern void sirfsoc_rtc_iobrg_besyncing(void); extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); +struct regmap *devm_regmap_init_iobg(struct device *dev, + const struct regmap_config *config); #endif -- cgit v0.10.2 From 6a3c45bb5a385be7049a7725a4fe93eaa76915f4 Mon Sep 17 00:00:00 2001 From: Adriana Reus Date: Fri, 12 Jun 2015 18:10:23 +0300 Subject: iio: inv-mpu: Specify the expected format/precision for write channels The gyroscope needs IIO_VAL_INT_PLUS_NANO for the scale channel and unless specified write returns MICRO by default. This needs to be properly specified so that write operations into scale have the expected behaviour. Signed-off-by: Adriana Reus Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 17d4bb1..65ce868 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) return -EINVAL; } +static int inv_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_ANGL_VEL: + return IIO_VAL_INT_PLUS_NANO; + default: + return IIO_VAL_INT_PLUS_MICRO; + } + default: + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; +} static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) { int result, i; @@ -696,6 +713,7 @@ static const struct iio_info mpu_info = { .driver_module = THIS_MODULE, .read_raw = &inv_mpu6050_read_raw, .write_raw = &inv_mpu6050_write_raw, + .write_raw_get_fmt = &inv_write_raw_get_fmt, .attrs = &inv_attribute_group, .validate_trigger = inv_mpu6050_validate_trigger, }; -- cgit v0.10.2 From bdc10d57f236b534fb675a4bbefd10017aeb2b26 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Thu, 11 Jun 2015 18:49:33 +0300 Subject: iio: ABI: Clarify proximity output value Current description for proximity measurement is ambiguous. While the first part says that proximity is measured by observing reflectivity, the second part incorrectly infers that reported values should behave like a distance. This is because of AS3935 lightning sensor which uses the proximity API, while not being a true proximity sensor. Note this is marked for stable as it accompanies a fix in ABI usage to the sx9500 driver which would otherwise appear to be correct. Fixes: 614e8842ddf ("iio: ABI: add clarification for proximity") Signed-off-by: Daniel Baluta Cc: Signed-off-by: Jonathan Cameron diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 3befcb1..1fbdd79 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -1165,10 +1165,8 @@ Description: object is near the sensor, usually be observing reflectivity of infrared or ultrasound emitted. Often these sensors are unit less and as such conversion - to SI units is not possible. Where it is, the units should - be meters. If such a conversion is not possible, the reported - values should behave in the same way as a distance, i.e. lower - values indicate something is closer to the sensor. + to SI units is not possible. Higher proximity measurements + indicate closer objects, and vice versa. What: /sys/.../iio:deviceX/in_illuminance_input What: /sys/.../iio:deviceX/in_illuminance_raw -- cgit v0.10.2 From fd1883f07cb434707e50c4c9a16e3ed4b3a5e74f Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Thu, 11 Jun 2015 18:49:34 +0300 Subject: iio: proximity: sx9500: Fix proximity value Because of the ABI confusion proximity value exposed by SX9500 was inverted. Signed-off-by: Daniel Baluta Reviewed-by: Vlad Dogaru Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index fa40f6d..bd26a48 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -206,7 +206,7 @@ static int sx9500_read_proximity(struct sx9500_data *data, if (ret < 0) return ret; - *val = 32767 - (s16)be16_to_cpu(regval); + *val = be16_to_cpu(regval); return IIO_VAL_INT; } -- cgit v0.10.2 From adfa969850ae93beca57f7527f0e4dc10cbe1309 Mon Sep 17 00:00:00 2001 From: JM Friedt Date: Fri, 19 Jun 2015 14:48:06 +0200 Subject: iio: DAC: ad5624r_spi: fix bit shift of output data value The value sent on the SPI bus is shifted by an erroneous number of bits. The shift value was already computed in the iio_chan_spec structure and hence subtracting this argument to 16 yields an erroneous data position in the SPI stream. Signed-off-by: JM Friedt Acked-by: Lars-Peter Clausen Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 61bb9d4..e98428d 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -22,7 +22,7 @@ #include "ad5624r.h" static int ad5624r_spi_write(struct spi_device *spi, - u8 cmd, u8 addr, u16 val, u8 len) + u8 cmd, u8 addr, u16 val, u8 shift) { u32 data; u8 msg[3]; @@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi, * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, * for the AD5664R, AD5644R, and AD5624R, respectively. */ - data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len)); + data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift); msg[0] = data >> 16; msg[1] = data >> 8; msg[2] = data; -- cgit v0.10.2 From 2ab5f39bc7825808e0fa1e7e5f0b23e174563467 Mon Sep 17 00:00:00 2001 From: Jan Leupold Date: Wed, 17 Jun 2015 18:21:36 +0200 Subject: iio: adc: at91_adc: allow to use full range of startup time The DT-Property "atmel,adc-startup-time" is stored in an u8 for a microsecond value. When trying to increase the value of STARTUP in Register AT91_ADC_MR some higher values can't be reached. Change the type in function parameter and private structure field from u8 to u32. Signed-off-by: Jan Leupold [nicolas.ferre@atmel.com: change commit message, increase u16 to u32 for startup time] Signed-off-by: Nicolas Ferre Acked-by: Alexandre Belloni Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 8a0eb4a..7b40925 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -182,7 +182,7 @@ struct at91_adc_caps { u8 ts_pen_detect_sensitivity; /* startup time calculate function */ - u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz); + u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz); u8 num_channels; struct at91_adc_reg_desc registers; @@ -201,7 +201,7 @@ struct at91_adc_state { u8 num_channels; void __iomem *reg_base; struct at91_adc_reg_desc *registers; - u8 startup_time; + u32 startup_time; u8 sample_hold_time; bool sleep_mode; struct iio_trigger **trig; @@ -779,7 +779,7 @@ ret: return ret; } -static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) +static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz) { /* * Number of ticks needed to cover the startup time of the ADC @@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8; } -static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz) +static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz) { /* * For sama5d3x and at91sam9x5, the formula changes to: -- cgit v0.10.2 From c288503b32e8c5534062a05ec565d28bffa06db3 Mon Sep 17 00:00:00 2001 From: Hartmut Knaack Date: Thu, 18 Jun 2015 00:31:59 +0200 Subject: iio:light:cm3323: clear bitmask before set When setting the bits for integration time, the appropriate bitmask needs to be cleared first. Signed-off-by: Hartmut Knaack Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c index 869033e..a1d4905 100644 --- a/drivers/iio/light/cm3323.c +++ b/drivers/iio/light/cm3323.c @@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2) for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) { if (val == cm3323_int_time[i].val && val2 == cm3323_int_time[i].val2) { - reg_conf = data->reg_conf; + reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK; reg_conf |= i << CM3323_CONF_IT_SHIFT; ret = i2c_smbus_write_word_data(data->client, -- cgit v0.10.2 From 7a1d0d91c94305fa5802a53df3a54c0ea1963c48 Mon Sep 17 00:00:00 2001 From: Hartmut Knaack Date: Mon, 15 Jun 2015 23:48:24 +0200 Subject: iio:accel:bmc150-accel: fix counting direction In bmc150_accel_unregister_triggers() triggers should be unregistered in reverse order of registration. Trigger registration starts with number 0, counting up. In consequence, trigger number needs to be count down here. Signed-off-by: Hartmut Knaack Reviewed-by: Octavian Purdila Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 73e8773..bf827d0 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c @@ -1465,7 +1465,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data, { int i; - for (i = from; i >= 0; i++) { + for (i = from; i >= 0; i--) { if (data->triggers[i].indio_trig) { iio_trigger_unregister(data->triggers[i].indio_trig); data->triggers[i].indio_trig = NULL; -- cgit v0.10.2 From 33361e5678a541f82f29f85467d589e7bf8da76b Mon Sep 17 00:00:00 2001 From: Peter Meerwald Date: Sun, 14 Jun 2015 23:09:35 +0200 Subject: iio: light: tcs3414: Fix bug preventing to set integration time the millisecond values in tcs3414_times should be checked against val2, not val, which is always zero. Signed-off-by: Peter Meerwald Reported-by: Stephan Kleisinger Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index 71c2bde..f8b1df0 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev, if (val != 0) return -EINVAL; for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) { - if (val == tcs3414_times[i] * 1000) { + if (val2 == tcs3414_times[i] * 1000) { data->timing &= ~TCS3414_INTEG_MASK; data->timing |= i; return i2c_smbus_write_byte_data( -- cgit v0.10.2 From b2b3c3dc6a7bef886850920f5f5dca041b443aa0 Mon Sep 17 00:00:00 2001 From: Hartmut Knaack Date: Sun, 21 Jun 2015 12:15:50 +0200 Subject: iio:adc:cc10001_adc: fix Kconfig dependency The Cosmic Circuits 10001 ADC driver depends on HAS_IOMEM, HAVE_CLK and REGULATOR together, not just any of these. Signed-off-by: Hartmut Knaack Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index e36a73e..1bcb65b 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -146,8 +146,7 @@ config DA9150_GPADC config CC10001_ADC tristate "Cosmic Circuits 10001 ADC driver" - depends on HAVE_CLK || REGULATOR - depends on HAS_IOMEM + depends on HAS_IOMEM && HAVE_CLK && REGULATOR select IIO_BUFFER select IIO_TRIGGERED_BUFFER help -- cgit v0.10.2 From a903e3b64adfc2e126237f06e0b3ea7d2d8d13b5 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 17 Mar 2015 15:31:11 +0200 Subject: drm/omap: return error if dma_alloc_writecombine fails On a platform with no TILER (e.g. omap3, am43xx), when the user wants to allocate buffer with flag OMAP_BO_SCANOUT, the buffer needs to be allocated with dma_alloc_writecombine. For some reason the driver does not return an error if that alloc fails, instead it continues without backing memory. This leads to errors later when the user tries to use the buffer. This patch makes the driver return an error if dma_alloc_writecombine fails. Signed-off-by: Tomi Valkeinen Acked-by: Laurent Pinchart diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 2ab7780..874eb6d 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1378,11 +1378,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); if (!omap_obj) - goto fail; - - spin_lock(&priv->list_lock); - list_add(&omap_obj->mm_list, &priv->obj_list); - spin_unlock(&priv->list_lock); + return NULL; obj = &omap_obj->base; @@ -1392,11 +1388,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, */ omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, &omap_obj->paddr, GFP_KERNEL); - if (omap_obj->vaddr) - flags |= OMAP_BO_DMA; + if (!omap_obj->vaddr) { + kfree(omap_obj); + return NULL; + } + + flags |= OMAP_BO_DMA; } + spin_lock(&priv->list_lock); + list_add(&omap_obj->mm_list, &priv->obj_list); + spin_unlock(&priv->list_lock); + omap_obj->flags = flags; if (flags & OMAP_BO_TILED) { -- cgit v0.10.2 From 7c258670ce655659a4c8d1013676c55e74d09ee7 Mon Sep 17 00:00:00 2001 From: Tedd Ho-Jeong An Date: Thu, 25 Jun 2015 23:27:55 -0700 Subject: Bluetooth: hidp: Initialize list header of hidp session user MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When new hidp session is created, list header in l2cap_user is not initialized and this causes list_empty() to fail in l2cap_register_user() even if l2cap_user list is empty. Signed-off-by: Tedd Ho-Jeong An Tested-by: Jörg Otte Signed-off-by: Marcel Holtmann diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 9070dfd..f1a117f 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -915,6 +915,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr, session->conn = l2cap_conn_get(conn); session->user.probe = hidp_session_probe; session->user.remove = hidp_session_remove; + INIT_LIST_HEAD(&session->user.list); session->ctrl_sock = ctrl_sock; session->intr_sock = intr_sock; skb_queue_head_init(&session->ctrl_transmit); -- cgit v0.10.2 From 398f74569cebbf06bc6b069442bcd0e9616ca465 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Fri, 19 Jun 2015 21:37:56 +0100 Subject: ARM: 8393/1: smp: Fix suspicious RCU usage with ipi tracepoints John Stultz reports an RCU splat on boot with ARM ipi trace events enabled. =============================== [ INFO: suspicious RCU usage. ] 4.1.0-rc7-00033-gb5bed2f #153 Not tainted ------------------------------- include/trace/events/ipi.h:68 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from idle CPU! rcu_scheduler_active = 1, debug_locks = 0 RCU used illegally from extended quiescent state! no locks held by swapper/0/0. stack backtrace: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.1.0-rc7-00033-gb5bed2f #153 Hardware name: Qualcomm (Flattened Device Tree) [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0x70/0xbc) [] (dump_stack) from [] (handle_IPI+0x428/0x604) [] (handle_IPI) from [] (gic_handle_irq+0x54/0x5c) [] (gic_handle_irq) from [] (__irq_svc+0x44/0x7c) Exception stack(0xc09f3f48 to 0xc09f3f90) 3f40: 00000001 00000001 00000000 c09f73b8 c09f4528 c0a5de9c 3f60: c076b4f0 00000000 00000000 c09ef108 c0a5cec1 00000001 00000000 c09f3f90 3f80: c026bf60 c0210ab8 20000113 ffffffff [] (__irq_svc) from [] (arch_cpu_idle+0x20/0x3c) [] (arch_cpu_idle) from [] (cpu_startup_entry+0x2c0/0x5dc) [] (cpu_startup_entry) from [] (start_kernel+0x358/0x3c4) [] (start_kernel) from [<8020807c>] (0x8020807c) At this point in the IPI handling path we haven't called irq_enter() yet, so RCU doesn't know that we're about to exit idle and properly warns that we're using RCU from an idle CPU. Use trace_ipi_entry_rcuidle() instead of trace_ipi_entry() so that RCU is informed about our exit from idle. Fixes: 365ec7b17327 ("ARM: add IPI tracepoints") Reported-by: John Stultz Tested-by: John Stultz Acked-by: Steven Rostedt Reviewed-by: Paul E. McKenney Signed-off-by: Stephen Boyd Signed-off-by: Russell King diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index cca5b87..f11d825 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -576,7 +576,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { - trace_ipi_entry(ipi_types[ipinr]); + trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } @@ -635,7 +635,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit(ipi_types[ipinr]); + trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); } -- cgit v0.10.2 From 3de1f52a3ae823265299409e276f56cdadddc310 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 25 Jun 2015 01:04:20 +0100 Subject: ARM: 8394/1: update memblock limit after mapping lowmem The memblock limit is currently used in find_limits to find the bounds for ZONE_NORMAL. The memblock limit may need to be rounded down a PMD size to ensure allocations are fully mapped though. This has the side effect of reducing the amount of memory in ZONE_NORMAL. Once all lowmem is mapped, it's safe to change the memblock limit back to include the unaligned section. Adjust the memblock limit after lowmem mapping is complete. Before: # cat /proc/zoneinfo | grep managed managed 62907 managed 424 After: # cat /proc/zoneinfo | grep managed managed 63331 Signed-off-by: Laura Abbott Reviewed-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Russell King diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7186382..904d153 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1519,6 +1519,7 @@ void __init paging_init(const struct machine_desc *mdesc) build_mem_type_table(); prepare_page_table(); map_lowmem(); + memblock_set_current_limit(arm_lowmem_limit); dma_contiguous_remap(); devicemaps_init(mdesc); kmap_init(); -- cgit v0.10.2 From e48866647b486f31ff7c3927b48de8bbb1c6a4c0 Mon Sep 17 00:00:00 2001 From: Vitaly Andrianov Date: Fri, 26 Jun 2015 17:13:03 +0100 Subject: ARM: 8396/1: use phys_addr_t in pfn_to_kaddr() This patch fixes pfn_to_kaddr() to use phys_addr_t. Without this, this macro is broken on LPAE systems. For physical addresses above first 4GB result of shifting pfn with PAGE_SHIFT may be truncated. Signed-off-by: Vitaly Andrianov Acked-by: Nicolas Pitre Acked-by: Santosh Shilimkar Signed-off-by: Russell King diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 184def0..063ef31 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -291,7 +291,7 @@ static inline void *phys_to_virt(phys_addr_t x) */ #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); -- cgit v0.10.2 From e6ae32c343981a50aa07c34767f2a967cc920edc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Jun 2015 10:34:38 +0100 Subject: ARM: fix DEBUG_SET_MODULE_RONX build dependencies randconfig testing reveals that DEBUG_SET_MODULE_RONX needs to depend on MMU otherwise these build errors are observed: kernel/built-in.o: In function `set_section_ro_nx': kernel/module.c:1738: undefined reference to `set_memory_nx' kernel/built-in.o: In function `set_page_attributes': kernel/module.c:1709: undefined reference to `set_memory_ro' This is because the pageattr functions are not built for !MMU configs as they don't have page tables. Signed-off-by: Russell King diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 0c12ffb..dfa21cb 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1590,7 +1590,7 @@ config PID_IN_CONTEXTIDR config DEBUG_SET_MODULE_RONX bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES + depends on MODULES && MMU ---help--- This option helps catch unintended modifications to loadable kernel module's text and read-only data. It also prevents execution -- cgit v0.10.2 From b4d103d1a45fed0da2f31c905eb5e053c84a41c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Jun 2015 10:49:45 +0100 Subject: ARM: add help text for HIGHPTE configuration entry Add some help text for the HIGHPTE configuration entry. This comes from the x86 entry, but reworded to be more a more accurate description of what this option does. Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 45df48b..cc0ea0c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1661,6 +1661,12 @@ config HIGHMEM config HIGHPTE bool "Allocate 2nd-level pagetables from highmem" depends on HIGHMEM + help + The VM uses one page of physical memory for each page table. + For systems with a lot of processes, this can use a lot of + precious low memory, eventually leading to low memory being + consumed by page tables. Setting this option will allow + user-space 2nd level page tables to reside in high memory. config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" -- cgit v0.10.2 From eeb3fee8f6cca5e7bf1647d9e327c7b40e384578 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Jun 2015 10:52:58 +0100 Subject: ARM: add helpful message when truncating physical memory Add a nmessage to suggest that HIGHMEM is enabled when physical memory is truncated due to lack of virtual address space to map it in the low memory mapping. Signed-off-by: Russell King diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 904d153..79de062 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void) int highmem = 0; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; struct memblock_region *reg; + bool should_use_highmem = false; for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; @@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void) pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", &block_start, &block_end); memblock_remove(reg->base, reg->size); + should_use_highmem = true; continue; } @@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void) &block_start, &block_end, &vmalloc_limit); memblock_remove(vmalloc_limit, overlap_size); block_end = vmalloc_limit; + should_use_highmem = true; } } @@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void) } } + if (should_use_highmem) + pr_notice("Consider using a HIGHMEM enabled kernel.\n"); + high_memory = __va(arm_lowmem_limit - 1) + 1; /* -- cgit v0.10.2 From 31f02455455d405320e2f749696bef4e02903b35 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 30 Jun 2015 12:07:17 -0400 Subject: sparse: fix misplaced __pmem definition Move the definition of __pmem outside of CONFIG_SPARSE_RCU_POINTER to fix: drivers/nvdimm/pmem.c:198:17: sparse: too many arguments for function __builtin_expect drivers/nvdimm/pmem.c:36:33: sparse: expected ; at end of declaration drivers/nvdimm/pmem.c:48:21: sparse: void declaration ...due to __pmem failing to be defined in some configurations when CONFIG_SPARSE_RCU_POINTER=y. Reported-by: kbuild test robot Reported-by: Dan Carpenter Signed-off-by: Dan Williams diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 26fc8bc..d8fbd50 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -17,11 +17,11 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) +# define __pmem __attribute__((noderef, address_space(5))) #ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) #else # define __rcu -# define __pmem __attribute__((noderef, address_space(5))) #endif extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); -- cgit v0.10.2 From af834d457d9ed69e14836b63d0da198fdd2ec706 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 30 Jun 2015 14:10:09 -0400 Subject: libnvdimm: smatch cleanups in __nd_ioctl Drop use of access_ok() since we are already using copy_{to|from}_user() which do their own access_ok(). Reported-by: Dan Carpenter Signed-off-by: Dan Williams diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 8eb22c0..73442bd 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -535,8 +535,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, __func__, dimm_name, cmd_name, i); return -ENXIO; } - if (!access_ok(VERIFY_READ, p + in_len, in_size)) - return -EFAULT; if (in_len < sizeof(in_env)) copy = min_t(u32, sizeof(in_env) - in_len, in_size); else @@ -557,8 +555,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, __func__, dimm_name, cmd_name, i); return -EFAULT; } - if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size)) - return -EFAULT; if (out_len < sizeof(out_env)) copy = min_t(u32, sizeof(out_env) - out_len, out_size); else @@ -570,9 +566,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } buf_len = out_len + in_len; - if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len))) - return -EFAULT; - if (buf_len > ND_IOCTL_MAX_BUFLEN) { dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__, dimm_name, cmd_name, buf_len, -- cgit v0.10.2 From daa1dee405d7d3d3e816b84a692e838a5647a02a Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 28 Jun 2015 17:00:57 +0800 Subject: nvdimm: Fix return value of nvdimm_bus_init() if class_create() fails Return proper error if class_create() fails. Signed-off-by: Axel Lin Signed-off-by: Dan Williams diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 73442bd..7e2c43f 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -699,8 +699,10 @@ int __init nvdimm_bus_init(void) nvdimm_major = rc; nd_class = class_create(THIS_MODULE, "nd"); - if (IS_ERR(nd_class)) + if (IS_ERR(nd_class)) { + rc = PTR_ERR(nd_class); goto err_class; + } return 0; -- cgit v0.10.2 From ab944c83f6690df0c7f67e6bcc29fc0c82ef6021 Mon Sep 17 00:00:00 2001 From: Tedd Ho-Jeong An Date: Tue, 30 Jun 2015 11:43:40 -0700 Subject: Bluetooth: Reinitialize the list after deletion for session user list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the user->list is deleted with list_del(), it doesn't initialize the entry which can cause the issue with list_empty(). According to the comment from the list.h, list_empty() returns false even if the list is empty and put the entry in an undefined state. /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ Because of this behavior, list_empty() returns false even if list is empty when the device is reconnected. So, user->list needs to be re-initialized after list_del(). list.h already have a macro list_del_init() which deletes the entry and initailze it again. Signed-off-by: Tedd Ho-Jeong An Tested-by: Jörg Otte Signed-off-by: Marcel Holtmann diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 51594fb..45fffa4 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1634,7 +1634,7 @@ void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) if (list_empty(&user->list)) goto out_unlock; - list_del(&user->list); + list_del_init(&user->list); user->remove(conn, user); out_unlock: @@ -1648,7 +1648,7 @@ static void l2cap_unregister_all_users(struct l2cap_conn *conn) while (!list_empty(&conn->users)) { user = list_first_entry(&conn->users, struct l2cap_user, list); - list_del(&user->list); + list_del_init(&user->list); user->remove(conn, user); } } -- cgit v0.10.2 From 193ccca43850d2355e7690a93ab9d7d78d38f905 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 30 Jun 2015 16:09:39 -0400 Subject: nfit: fix smatch "use after null check" report drivers/acpi/nfit.c:1224 acpi_nfit_blk_region_enable() error: we previously assumed 'nfit_mem' could be null (see line 1223) drivers/acpi/nfit.c 1222 nfit_mem = nvdimm_provider_data(nvdimm); 1223 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { ^^^^^^^^ Check. 1224 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1225 nfit_mem ? "" : " nfit_mem", 1226 nfit_mem->dcr ? "" : " dcr", ^^^^^^^^^^^^^ Unchecked dereference. Reported-by: Dan Carpenter Acked-by: Ross Zwisler Signed-off-by: Dan Williams diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 2161fa1..a20b7c8 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1223,8 +1223,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { dev_dbg(dev, "%s: missing%s%s%s\n", __func__, nfit_mem ? "" : " nfit_mem", - nfit_mem->dcr ? "" : " dcr", - nfit_mem->bdw ? "" : " bdw"); + (nfit_mem && nfit_mem->dcr) ? "" : " dcr", + (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); return -ENXIO; } -- cgit v0.10.2 From fe7599079b03d521d376da88920cc7b87f71ae25 Mon Sep 17 00:00:00 2001 From: Yang Dongsheng Date: Wed, 3 Jun 2015 14:57:32 +0800 Subject: btrfs: qgroup: allow user to clear the limitation on qgroup Currently, we can only set a limitation on a qgroup, but we can not clear it. This patch provide a choice to user to clear a limitation on qgroup by passing a value of CLEAR_VALUE(-1) to kernel. Reported-by: Tsutomu Itoh Signed-off-by: Dongsheng Yang Tested-by: Tsutomu Itoh Signed-off-by: Chris Mason diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index d5f1f03..e9ace09 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1349,6 +1349,11 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; int ret = 0; + /* Sometimes we would want to clear the limit on this qgroup. + * To meet this requirement, we treat the -1 as a special value + * which tell kernel to clear the limit on this qgroup. + */ + const u64 CLEAR_VALUE = -1; mutex_lock(&fs_info->qgroup_ioctl_lock); quota_root = fs_info->quota_root; @@ -1364,14 +1369,42 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, } spin_lock(&fs_info->qgroup_lock); - if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) - qgroup->max_rfer = limit->max_rfer; - if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) - qgroup->max_excl = limit->max_excl; - if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) - qgroup->rsv_rfer = limit->rsv_rfer; - if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) - qgroup->rsv_excl = limit->rsv_excl; + if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { + if (limit->max_rfer == CLEAR_VALUE) { + qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; + limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; + qgroup->max_rfer = 0; + } else { + qgroup->max_rfer = limit->max_rfer; + } + } + if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { + if (limit->max_excl == CLEAR_VALUE) { + qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; + limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; + qgroup->max_excl = 0; + } else { + qgroup->max_excl = limit->max_excl; + } + } + if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { + if (limit->rsv_rfer == CLEAR_VALUE) { + qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; + limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; + qgroup->rsv_rfer = 0; + } else { + qgroup->rsv_rfer = limit->rsv_rfer; + } + } + if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { + if (limit->rsv_excl == CLEAR_VALUE) { + qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; + limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; + qgroup->rsv_excl = 0; + } else { + qgroup->rsv_excl = limit->rsv_excl; + } + } qgroup->lim_flags |= limit->flags; spin_unlock(&fs_info->qgroup_lock); -- cgit v0.10.2 From 65f5333875d7bbfc13436e224a181d10a80d1ada Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Mon, 8 Jun 2015 20:05:50 +0800 Subject: btrfs: cleanup noused initialization of dev in btrfs_end_bio() It is introduced by: c404e0dc2c843b154f9a36c3aec10d0a715d88eb Btrfs: fix use-after-free in the finishing procedure of the device replace But seems no relationship with that bug, this patch revirt these code block for cleanup. Signed-off-by: Zhao Lei Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8396699..d4cd405 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5715,7 +5715,6 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e static void btrfs_end_bio(struct bio *bio, int err) { struct btrfs_bio *bbio = bio->bi_private; - struct btrfs_device *dev = bbio->stripes[0].dev; int is_orig_bio = 0; if (err) { @@ -5723,6 +5722,7 @@ static void btrfs_end_bio(struct bio *bio, int err) if (err == -EIO || err == -EREMOTEIO) { unsigned int stripe_index = btrfs_io_bio(bio)->stripe_index; + struct btrfs_device *dev; BUG_ON(stripe_index >= bbio->num_stripes); dev = bbio->stripes[stripe_index].dev; -- cgit v0.10.2 From e82afc52abff07a4acbc90f899598ebafb662831 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Fri, 12 Jun 2015 20:36:58 +0800 Subject: btrfs: add error handling for scrub_workers_get() Although it is a rare case, we'd better free previous allocated memory on error. Signed-off-by: Zhao Lei Signed-off-by: Qu Wenruo Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9f2feab..94db0fa 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3571,7 +3571,6 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { - int ret = 0; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; @@ -3584,34 +3583,36 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, fs_info->scrub_workers = btrfs_alloc_workqueue("btrfs-scrub", flags, max_active, 4); - if (!fs_info->scrub_workers) { - ret = -ENOMEM; - goto out; - } + if (!fs_info->scrub_workers) + goto fail_scrub_workers; + fs_info->scrub_wr_completion_workers = btrfs_alloc_workqueue("btrfs-scrubwrc", flags, max_active, 2); - if (!fs_info->scrub_wr_completion_workers) { - ret = -ENOMEM; - goto out; - } + if (!fs_info->scrub_wr_completion_workers) + goto fail_scrub_wr_completion_workers; + fs_info->scrub_nocow_workers = btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0); - if (!fs_info->scrub_nocow_workers) { - ret = -ENOMEM; - goto out; - } + if (!fs_info->scrub_nocow_workers) + goto fail_scrub_nocow_workers; fs_info->scrub_parity_workers = btrfs_alloc_workqueue("btrfs-scrubparity", flags, max_active, 2); - if (!fs_info->scrub_parity_workers) { - ret = -ENOMEM; - goto out; - } + if (!fs_info->scrub_parity_workers) + goto fail_scrub_parity_workers; } ++fs_info->scrub_workers_refcnt; -out: - return ret; + return 0; + +fail_scrub_parity_workers: + btrfs_destroy_workqueue(fs_info->scrub_nocow_workers); +fail_scrub_nocow_workers: + btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); +fail_scrub_wr_completion_workers: + btrfs_destroy_workqueue(fs_info->scrub_workers); +fail_scrub_workers: + return -ENOMEM; } static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) -- cgit v0.10.2 From 67c5e7d464bc466471b05e027abe8a6b29687ebd Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 11 Jun 2015 00:58:53 +0100 Subject: Btrfs: fix race between balance and unused block group deletion We have a race between deleting an unused block group and balancing the same block group that leads to an assertion failure/BUG(), producing the following trace: [181631.208236] BTRFS: assertion failed: 0, file: fs/btrfs/volumes.c, line: 2622 [181631.220591] ------------[ cut here ]------------ [181631.222959] kernel BUG at fs/btrfs/ctree.h:4062! [181631.223932] invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC [181631.224566] Modules linked in: btrfs dm_flakey dm_mod crc32c_generic xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop fuse acpi_cpufreq parpor$ [181631.224566] CPU: 8 PID: 17451 Comm: btrfs Tainted: G W 4.1.0-rc5-btrfs-next-10+ #1 [181631.224566] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.1-0-g4adadbd-20150316_085822-nilsson.home.kraxel.org 04/01/2014 [181631.224566] task: ffff880127e09590 ti: ffff8800b5824000 task.ti: ffff8800b5824000 [181631.224566] RIP: 0010:[] [] assfail.constprop.50+0x1e/0x20 [btrfs] [181631.224566] RSP: 0018:ffff8800b5827ae8 EFLAGS: 00010246 [181631.224566] RAX: 0000000000000040 RBX: ffff8800109fc218 RCX: ffffffff81095dce [181631.224566] RDX: 0000000000005124 RSI: ffffffff81464819 RDI: 00000000ffffffff [181631.224566] RBP: ffff8800b5827ae8 R08: 0000000000000001 R09: 0000000000000000 [181631.224566] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8800109fc200 [181631.224566] R13: ffff880020095000 R14: ffff8800b1a13f38 R15: ffff880020095000 [181631.224566] FS: 00007f70ca0b0c80(0000) GS:ffff88013ec00000(0000) knlGS:0000000000000000 [181631.224566] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [181631.224566] CR2: 00007f2872ab6e68 CR3: 00000000a717c000 CR4: 00000000000006e0 [181631.224566] Stack: [181631.224566] ffff8800b5827ba8 ffffffffa03f3916 ffff8800b5827b38 ffffffffa03d080e [181631.224566] ffffffffa03d1423 ffff880020095000 ffff88001233c000 0000000000000001 [181631.224566] ffff880020095000 ffff8800b1a13f38 0000000a69c00000 0000000000000000 [181631.224566] Call Trace: [181631.224566] [] btrfs_remove_chunk+0xa4/0x6bb [btrfs] [181631.224566] [] ? join_transaction.isra.8+0xb9/0x3ba [btrfs] [181631.224566] [] ? wait_current_trans.isra.13+0x22/0xfc [btrfs] [181631.224566] [] btrfs_relocate_chunk.isra.29+0x8f/0xa7 [btrfs] [181631.224566] [] btrfs_balance+0xaa4/0xc52 [btrfs] [181631.224566] [] btrfs_ioctl_balance+0x23f/0x2b0 [btrfs] [181631.224566] [] ? trace_hardirqs_on+0xd/0xf [181631.224566] [] btrfs_ioctl+0xfe2/0x2220 [btrfs] [181631.224566] [] ? __this_cpu_preempt_check+0x13/0x15 [181631.224566] [] ? arch_local_irq_save+0x9/0xc [181631.224566] [] ? handle_mm_fault+0x834/0xcd2 [181631.224566] [] ? handle_mm_fault+0x834/0xcd2 [181631.224566] [] ? __do_page_fault+0x211/0x424 [181631.224566] [] do_vfs_ioctl+0x3c6/0x479 (...) The sequence of steps leading to this are: CPU 0 CPU 1 btrfs_balance() btrfs_relocate_chunk() btrfs_relocate_block_group(bg X) btrfs_lookup_block_group(bg X) cleaner_kthread locks fs_info->cleaner_mutex btrfs_delete_unused_bgs() finds bg X, which became unused in the previous transaction checks bg X ->ro == 0, so it proceeds sets bg X ->ro to 1 (btrfs_set_block_group_ro(bg X)) blocks on fs_info->cleaner_mutex btrfs_remove_chunk(bg X) unlocks fs_info->cleaner_mutex acquires fs_info->cleaner_mutex relocate_block_group() --> does nothing, no extents found in the extent tree from bg X unlocks fs_info->cleaner_mutex btrfs_relocate_block_group(bg X) returns btrfs_remove_chunk(bg X) extent map not found --> ASSERT(0) Fix this by using a new mutex to make sure these 2 operations, block group relocation and removal, are serialized. This issue is reproducible by running fstests generic/038 (which stresses chunk allocation and automatic removal of unused block groups) together with the following balance loop: while true; do btrfs balance start -dusage=0 ; done Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 80a9aef..aac314e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1778,6 +1778,7 @@ struct btrfs_fs_info { spinlock_t unused_bgs_lock; struct list_head unused_bgs; struct mutex unused_bg_unpin_mutex; + struct mutex delete_unused_bgs_mutex; /* For btrfs to record security options */ struct security_mnt_opts security_opts; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b977fc8..b59deb2 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1772,7 +1772,6 @@ static int cleaner_kthread(void *arg) } btrfs_run_delayed_iputs(root); - btrfs_delete_unused_bgs(root->fs_info); again = btrfs_clean_one_deleted_snapshot(root); mutex_unlock(&root->fs_info->cleaner_mutex); @@ -1781,6 +1780,16 @@ static int cleaner_kthread(void *arg) * needn't do anything special here. */ btrfs_run_defrag_inodes(root->fs_info); + + /* + * Acquires fs_info->delete_unused_bgs_mutex to avoid racing + * with relocation (btrfs_relocate_chunk) and relocation + * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) + * after acquiring fs_info->delete_unused_bgs_mutex. So we + * can't hold, nor need to, fs_info->cleaner_mutex when deleting + * unused block groups. + */ + btrfs_delete_unused_bgs(root->fs_info); sleep: if (!try_to_freeze() && !again) { set_current_state(TASK_INTERRUPTIBLE); @@ -2492,6 +2501,7 @@ int open_ctree(struct super_block *sb, spin_lock_init(&fs_info->unused_bgs_lock); rwlock_init(&fs_info->tree_mod_log_lock); mutex_init(&fs_info->unused_bg_unpin_mutex); + mutex_init(&fs_info->delete_unused_bgs_mutex); mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->delalloc_root_mutex); seqlock_init(&fs_info->profiles_lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 38b76cc..1c2bd17 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -9889,6 +9889,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) } spin_unlock(&fs_info->unused_bgs_lock); + mutex_lock(&root->fs_info->delete_unused_bgs_mutex); + /* Don't want to race with allocators so take the groups_sem */ down_write(&space_info->groups_sem); spin_lock(&block_group->lock); @@ -9983,6 +9985,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) end_trans: btrfs_end_transaction(trans, root); next: + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); btrfs_put_block_group(block_group); spin_lock(&fs_info->unused_bgs_lock); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d4cd405..9b95503 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2766,6 +2766,20 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, root = root->fs_info->chunk_root; extent_root = root->fs_info->extent_root; + /* + * Prevent races with automatic removal of unused block groups. + * After we relocate and before we remove the chunk with offset + * chunk_offset, automatic removal of the block group can kick in, + * resulting in a failure when calling btrfs_remove_chunk() below. + * + * Make sure to acquire this mutex before doing a tree search (dev + * or chunk trees) to find chunks. Otherwise the cleaner kthread might + * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after + * we release the path used to search the chunk/dev tree and before + * the current task acquires this mutex and calls us. + */ + ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex)); + ret = btrfs_can_relocate(extent_root, chunk_offset); if (ret) return -ENOSPC; @@ -2814,13 +2828,18 @@ again: key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { + mutex_lock(&root->fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); goto error; + } BUG_ON(ret == 0); /* Corruption */ ret = btrfs_previous_item(chunk_root, path, key.objectid, key.type); + if (ret) + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); if (ret < 0) goto error; if (ret > 0) @@ -2843,6 +2862,7 @@ again: else BUG_ON(ret); } + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); if (found_key.offset == 0) break; @@ -3299,9 +3319,12 @@ again: goto error; } + mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; + } /* * this shouldn't happen, it means the last relocate @@ -3313,6 +3336,7 @@ again: ret = btrfs_previous_item(chunk_root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) { + mutex_unlock(&fs_info->delete_unused_bgs_mutex); ret = 0; break; } @@ -3321,8 +3345,10 @@ again: slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &found_key, slot); - if (found_key.objectid != key.objectid) + if (found_key.objectid != key.objectid) { + mutex_unlock(&fs_info->delete_unused_bgs_mutex); break; + } chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); @@ -3335,10 +3361,13 @@ again: ret = should_balance_chunk(chunk_root, leaf, chunk, found_key.offset); btrfs_release_path(path); - if (!ret) + if (!ret) { + mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto loop; + } if (counting) { + mutex_unlock(&fs_info->delete_unused_bgs_mutex); spin_lock(&fs_info->balance_lock); bctl->stat.expected++; spin_unlock(&fs_info->balance_lock); @@ -3348,6 +3377,7 @@ again: ret = btrfs_relocate_chunk(chunk_root, found_key.objectid, found_key.offset); + mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret && ret != -ENOSPC) goto error; if (ret == -ENOSPC) { @@ -4087,11 +4117,16 @@ again: key.type = BTRFS_DEV_EXTENT_KEY; do { + mutex_lock(&root->fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); goto done; + } ret = btrfs_previous_item(root, path, 0, key.type); + if (ret) + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); if (ret < 0) goto done; if (ret) { @@ -4105,6 +4140,7 @@ again: btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); btrfs_release_path(path); break; } @@ -4113,6 +4149,7 @@ again: length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); btrfs_release_path(path); break; } @@ -4122,6 +4159,7 @@ again: btrfs_release_path(path); ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset); + mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); if (ret && ret != -ENOSPC) goto done; if (ret == -ENOSPC) -- cgit v0.10.2 From c3f4a1685bb87e59c886ee68f7967eae07d4dffa Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 13 Jun 2015 06:52:56 +0100 Subject: Btrfs: use kmem_cache_free when freeing entry in inode cache The free space entries are allocated using kmem_cache_zalloc(), through __btrfs_add_free_space(), therefore we should use kmem_cache_free() and not kfree() to avoid any confusion and any potential problem. Looking at the kfree() definition at mm/slab.c it has the following comment: /* * (...) * * Don't free memory not originally allocated by kmalloc() * or you will run into trouble. */ So better be safe and use kmem_cache_free(). Cc: stable@vger.kernel.org Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index f6a596d..218df70 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -271,7 +271,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) __btrfs_add_free_space(ctl, info->offset, count); free: rb_erase(&info->offset_index, rbroot); - kfree(info); + kmem_cache_free(btrfs_free_space_cachep, info); } } -- cgit v0.10.2 From ae9d8f17118551bedd797406a6768b87c2146234 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 13 Jun 2015 06:52:57 +0100 Subject: Btrfs: fix race between caching kthread and returning inode to inode cache While the inode cache caching kthread is calling btrfs_unpin_free_ino(), we could have a concurrent call to btrfs_return_ino() that adds a new entry to the root's free space cache of pinned inodes. This concurrent call does not acquire the fs_info->commit_root_sem before adding a new entry if the caching state is BTRFS_CACHE_FINISHED, which is a problem because the caching kthread calls btrfs_unpin_free_ino() after setting the caching state to BTRFS_CACHE_FINISHED and therefore races with the task calling btrfs_return_ino(), which is adding a new entry, while the former (caching kthread) is navigating the cache's rbtree, removing and freeing nodes from the cache's rbtree without acquiring the spinlock that protects the rbtree. This race resulted in memory corruption due to double free of struct btrfs_free_space objects because both tasks can end up doing freeing the same objects. Note that adding a new entry can result in merging it with other entries in the cache, in which case those entries are freed. This is particularly important as btrfs_free_space structures are also used for the block group free space caches. This memory corruption can be detected by a debugging kernel, which reports it with the following trace: [132408.501148] slab error in verify_redzone_free(): cache `btrfs_free_space': double free detected [132408.505075] CPU: 15 PID: 12248 Comm: btrfs-ino-cache Tainted: G W 4.1.0-rc5-btrfs-next-10+ #1 [132408.505075] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.1-0-g4adadbd-20150316_085822-nilsson.home.kraxel.org 04/01/2014 [132408.505075] ffff880023e7d320 ffff880163d73cd8 ffffffff8145eec7 ffffffff81095dce [132408.505075] ffff880009735d40 ffff880163d73ce8 ffffffff81154e1e ffff880163d73d68 [132408.505075] ffffffff81155733 ffffffffa054a95a ffff8801b6099f00 ffffffffa0505b5f [132408.505075] Call Trace: [132408.505075] [] dump_stack+0x4f/0x7b [132408.505075] [] ? console_unlock+0x356/0x3a2 [132408.505075] [] __slab_error.isra.28+0x25/0x36 [132408.505075] [] __cache_free+0xe2/0x4b6 [132408.505075] [] ? __btrfs_add_free_space+0x2f0/0x343 [btrfs] [132408.505075] [] ? btrfs_unpin_free_ino+0x8e/0x99 [btrfs] [132408.505075] [] ? time_hardirqs_off+0x15/0x28 [132408.505075] [] ? trace_hardirqs_off+0xd/0xf [132408.505075] [] ? kfree+0xb6/0x14e [132408.505075] [] kfree+0xe5/0x14e [132408.505075] [] btrfs_unpin_free_ino+0x8e/0x99 [btrfs] [132408.505075] [] caching_kthread+0x29e/0x2d9 [btrfs] [132408.505075] [] ? btrfs_unpin_free_ino+0x99/0x99 [btrfs] [132408.505075] [] kthread+0xef/0xf7 [132408.505075] [] ? time_hardirqs_on+0x15/0x28 [132408.505075] [] ? __kthread_parkme+0xad/0xad [132408.505075] [] ret_from_fork+0x42/0x70 [132408.505075] [] ? __kthread_parkme+0xad/0xad [132408.505075] ffff880023e7d320: redzone 1:0x9f911029d74e35b, redzone 2:0x9f911029d74e35b. [132409.501654] slab: double free detected in cache 'btrfs_free_space', objp ffff880023e7d320 [132409.503355] ------------[ cut here ]------------ [132409.504241] kernel BUG at mm/slab.c:2571! Therefore fix this by having btrfs_unpin_free_ino() acquire the lock that protects the rbtree while doing the searches and removing entries. Fixes: 1c70d8fb4dfa ("Btrfs: fix inode caching vs tree log") Cc: stable@vger.kernel.org Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 218df70..d4a582a 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) { struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; + spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock; struct btrfs_free_space *info; struct rb_node *n; u64 count; @@ -254,23 +255,29 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) return; while (1) { + bool add_to_ctl = true; + + spin_lock(rbroot_lock); n = rb_first(rbroot); - if (!n) + if (!n) { + spin_unlock(rbroot_lock); break; + } info = rb_entry(n, struct btrfs_free_space, offset_index); BUG_ON(info->bitmap); /* Logic error */ if (info->offset > root->ino_cache_progress) - goto free; + add_to_ctl = false; else if (info->offset + info->bytes > root->ino_cache_progress) count = root->ino_cache_progress - info->offset + 1; else count = info->bytes; - __btrfs_add_free_space(ctl, info->offset, count); -free: rb_erase(&info->offset_index, rbroot); + spin_unlock(rbroot_lock); + if (add_to_ctl) + __btrfs_add_free_space(ctl, info->offset, count); kmem_cache_free(btrfs_free_space_cachep, info); } } -- cgit v0.10.2 From da288d280d16f4d7e4ada331cb33d381b408b10c Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 13 Jun 2015 06:55:31 +0100 Subject: Btrfs: fix crash on close_ctree() if cleaner starts new transaction Often when running fstests btrfs/079 I was running into the following trace during umount on one of my qemu/kvm test vms: [ 8245.682441] WARNING: CPU: 8 PID: 25064 at fs/btrfs/extent-tree.c:138 btrfs_put_block_group+0x51/0x69 [btrfs]() [ 8245.685039] Modules linked in: btrfs dm_flakey dm_mod crc32c_generic xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop fuse parport_pc i2c_piix4 acpi_cpufreq processor psmouse i2c_core thermal_sys parport evdev serio_raw button pcspkr microcode ext4 crc16 jbd2 mbcache sg sr_mod cdrom sd_mod ata_generic virtio_scsi ata_piix libata floppy virtio_pci virtio_ring scsi_mod virtio e1000 [last unloaded: btrfs] [ 8245.693860] CPU: 8 PID: 25064 Comm: umount Tainted: G W 4.1.0-rc5-btrfs-next-10+ #1 [ 8245.695081] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.1-0-g4adadbd-20150316_085822-nilsson.home.kraxel.org 04/01/2014 [ 8245.697583] 0000000000000009 ffff88020d047ce8 ffffffff8145eec7 ffffffff81095dce [ 8245.699234] 0000000000000000 ffff88020d047d28 ffffffff8104b399 0000000000000028 [ 8245.700995] ffffffffa04db07b ffff8801c6036c00 ffff8801c6036d68 ffff880202eb40b0 [ 8245.702510] Call Trace: [ 8245.703006] [] dump_stack+0x4f/0x7b [ 8245.705393] [] ? console_unlock+0x356/0x3a2 [ 8245.706569] [] warn_slowpath_common+0xa1/0xbb [ 8245.707747] [] ? btrfs_put_block_group+0x51/0x69 [btrfs] [ 8245.709101] [] warn_slowpath_null+0x1a/0x1c [ 8245.710274] [] btrfs_put_block_group+0x51/0x69 [btrfs] [ 8245.711823] [] btrfs_free_block_groups+0x145/0x322 [btrfs] [ 8245.713251] [] close_ctree+0x1ef/0x325 [btrfs] [ 8245.714448] [] ? evict_inodes+0xdc/0xeb [ 8245.715539] [] btrfs_put_super+0x19/0x1b [btrfs] [ 8245.716835] [] generic_shutdown_super+0x73/0xef [ 8245.718015] [] kill_anon_super+0x13/0x1e [ 8245.719101] [] btrfs_kill_super+0x17/0x23 [btrfs] [ 8245.720316] [] deactivate_locked_super+0x3b/0x68 [ 8245.721517] [] deactivate_super+0x3f/0x43 [ 8245.722581] [] cleanup_mnt+0x59/0x78 [ 8245.723538] [] __cleanup_mnt+0x12/0x14 [ 8245.724572] [] task_work_run+0x8f/0xbc [ 8245.725598] [] do_notify_resume+0x45/0x53 [ 8245.726892] [] int_signal+0x12/0x17 [ 8245.737887] ---[ end trace a01d038397e99b92 ]--- [ 8245.769363] general protection fault: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC [ 8245.770737] Modules linked in: btrfs dm_flakey dm_mod crc32c_generic xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop fuse parport_pc i2c_piix4 acpi_cpufreq processor psmouse i2c_core thermal_sys parport evdev serio_raw button pcspkr microcode ext4 crc16 jbd2 mbcache sg sr_mod cdrom sd_mod ata_generic virtio_scsi ata_piix libata floppy virtio_pci virtio_ring scsi_mod virtio e1000 [last unloaded: btrfs] [ 8245.772641] CPU: 2 PID: 25064 Comm: umount Tainted: G W 4.1.0-rc5-btrfs-next-10+ #1 [ 8245.772641] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.1-0-g4adadbd-20150316_085822-nilsson.home.kraxel.org 04/01/2014 [ 8245.772641] task: ffff880013005810 ti: ffff88020d044000 task.ti: ffff88020d044000 [ 8245.772641] RIP: 0010:[] [] btrfs_queue_work+0x2c/0x14d [btrfs] [ 8245.772641] RSP: 0018:ffff88020d0478b8 EFLAGS: 00010202 [ 8245.772641] RAX: 0000000000000004 RBX: 6b6b6b6b6b6b6b6b RCX: ffffffffa0581488 [ 8245.772641] RDX: 0000000000000000 RSI: ffff880194b7bf48 RDI: ffff880144b6a7a0 [ 8245.772641] RBP: ffff88020d0478d8 R08: 0000000000000000 R09: 000000000000ffff [ 8245.772641] R10: 0000000000000004 R11: 0000000000000005 R12: ffff880194b7bf48 [ 8245.772641] R13: ffff880194b7bf48 R14: 0000000000000410 R15: 0000000000000000 [ 8245.772641] FS: 00007f991e77d840(0000) GS:ffff88023e280000(0000) knlGS:0000000000000000 [ 8245.772641] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [ 8245.772641] CR2: 00007fbbd325ee68 CR3: 000000021de8e000 CR4: 00000000000006e0 [ 8245.772641] Stack: [ 8245.772641] ffff880194b7bf00 ffff880202eb4000 ffff880194b7bf48 0000000000000410 [ 8245.772641] ffff88020d047958 ffffffffa04ec6d5 ffff8801629b2ee8 0000000082987570 [ 8245.772641] 0000000000a5813f 0000000000000001 ffff880013006100 0000000000000002 [ 8245.772641] Call Trace: [ 8245.772641] [] btrfs_wq_submit_bio+0xe1/0x17b [btrfs] [ 8245.772641] [] ? check_irq_usage+0x76/0x87 [ 8245.772641] [] btree_submit_bio_hook+0xb6/0xd9 [btrfs] [ 8245.772641] [] ? btree_csum_one_bio+0xad/0xad [btrfs] [ 8245.772641] [] ? btree_io_failed_hook+0x5e/0x5e [btrfs] [ 8245.772641] [] submit_one_bio+0x8c/0xc7 [btrfs] [ 8245.772641] [] submit_extent_page.isra.18+0x9d/0x186 [btrfs] [ 8245.772641] [] write_one_eb+0x117/0x1ae [btrfs] [ 8245.772641] [] ? end_extent_buffer_writeback+0x21/0x21 [btrfs] [ 8245.772641] [] btree_write_cache_pages+0x2ab/0x385 [btrfs] [ 8245.772641] [] btree_writepages+0x23/0x5c [btrfs] [ 8245.772641] [] do_writepages+0x23/0x2c [ 8245.772641] [] __writeback_single_inode+0xda/0x5bd [ 8245.772641] [] ? writeback_single_inode+0x2b/0x173 [ 8245.772641] [] writeback_single_inode+0xc8/0x173 [ 8245.772641] [] write_inode_now+0x8a/0x95 [ 8245.772641] [] ? _atomic_dec_and_lock+0x30/0x4e [ 8245.772641] [] iput+0x17d/0x26a [ 8245.772641] [] close_ctree+0x22a/0x325 [btrfs] [ 8245.772641] [] ? evict_inodes+0xdc/0xeb [ 8245.772641] [] btrfs_put_super+0x19/0x1b [btrfs] [ 8245.772641] [] generic_shutdown_super+0x73/0xef [ 8245.772641] [] kill_anon_super+0x13/0x1e [ 8245.772641] [] btrfs_kill_super+0x17/0x23 [btrfs] [ 8245.772641] [] deactivate_locked_super+0x3b/0x68 [ 8245.772641] [] deactivate_super+0x3f/0x43 [ 8245.772641] [] cleanup_mnt+0x59/0x78 [ 8245.772641] [] __cleanup_mnt+0x12/0x14 [ 8245.772641] [] task_work_run+0x8f/0xbc [ 8245.772641] [] do_notify_resume+0x45/0x53 [ 8245.772641] [] int_signal+0x12/0x17 [ 8245.772641] Code: 1f 44 00 00 55 48 89 e5 41 56 41 55 41 54 53 49 89 f4 48 8b 46 70 a8 04 74 09 48 8b 5f 08 48 85 db 75 03 48 8b 1f 49 89 5c 24 68 <83> 7b 5c ff 74 04 f0 ff 43 50 49 83 7c 24 08 00 74 2c 4c 8d 6b [ 8245.772641] RIP [] btrfs_queue_work+0x2c/0x14d [btrfs] [ 8245.772641] RSP [ 8245.845040] ---[ end trace a01d038397e99b93 ]--- For logical reasons such as the phase of the moon, this happened more often with "-o inode_cache" than without any mount options. After some debugging it turned out to be simple to understand what was happening: 1) close_ctree() is called; 2) It then stops the transaction kthread, which commits the current transaction; 3) It asks the cleaner kthread to stop, which is currently running btrfs_delete_unused_bgs(); 4) btrfs_delete_unused_bgs() finds an unused block group, starts a new transaction, deletes the block group, which implies COWing some tree nodes and leafs and dirtying their respective pages, and then finally it ends the transaction it started, without committing it; 5) The cleaner kthread stops; 6) close_ctree() releases (from memory) the block group objects, which produces the warning in the trace pasted above; 7) Then it invalidates all pages of the btree inode, by calling invalidate_inode_pages2(), which waits for any pages under writeback, and releases any non-dirty pages; 8) All work queues are destroyed (waiting first for their current tasks to finish execution); 9) A final iput() is called against the btree inode; 10) This iput triggers a writeback of the btree inode because it still has dirty pages; 11) This starts the whole chain of callbacks for the btree inode until it eventually reaches btrfs_wq_submit_bio() where it leads to a NULL pointer dereference because the work queues were already destroyed. Fix this by making the cleaner commit any transaction that it started after the transaction kthread was stopped. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b59deb2..e5aad7f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1751,6 +1751,7 @@ static int cleaner_kthread(void *arg) { struct btrfs_root *root = arg; int again; + struct btrfs_trans_handle *trans; do { again = 0; @@ -1798,6 +1799,34 @@ sleep: __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); + + /* + * Transaction kthread is stopped before us and wakes us up. + * However we might have started a new transaction and COWed some + * tree blocks when deleting unused block groups for example. So + * make sure we commit the transaction we started to have a clean + * shutdown when evicting the btree inode - if it has dirty pages + * when we do the final iput() on it, eviction will trigger a + * writeback for it which will fail with null pointer dereferences + * since work queues and other resources were already released and + * destroyed by the time the iput/eviction/writeback is made. + */ + trans = btrfs_attach_transaction(root); + if (IS_ERR(trans)) { + if (PTR_ERR(trans) != -ENOENT) + btrfs_err(root->fs_info, + "cleaner transaction attach returned %ld", + PTR_ERR(trans)); + } else { + int ret; + + ret = btrfs_commit_transaction(trans, root); + if (ret) + btrfs_err(root->fs_info, + "cleaner open transaction commit returned %d", + ret); + } + return 0; } -- cgit v0.10.2 From e4545de5b035c7debb73d260c78377dbb69cbfb5 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 17 Jun 2015 12:49:23 +0100 Subject: Btrfs: fix fsync data loss after append write If we do an append write to a file (which increases its inode's i_size) that does not have the flag BTRFS_INODE_NEEDS_FULL_SYNC set in its inode, and the previous transaction added a new hard link to the file, which sets the flag BTRFS_INODE_COPY_EVERYTHING in the file's inode, and then fsync the file, the inode's new i_size isn't logged. This has the consequence that after the fsync log is replayed, the file size remains what it was before the append write operation, which means users/applications will not be able to read the data that was successsfully fsync'ed before. This happens because neither the inode item nor the delayed inode get their i_size updated when the append write is made - doing so would require starting a transaction in the buffered write path, something that we do not do intentionally for performance reasons. Fix this by making sure that when the flag BTRFS_INODE_COPY_EVERYTHING is set the inode is logged with its current i_size (log the in-memory inode into the log tree). This issue is not a recent regression and is easy to reproduce with the following test case for fstests: seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" here=`pwd` tmp=/tmp/$$ status=1 # failure is the default! _cleanup() { _cleanup_flakey rm -f $tmp.* } trap "_cleanup; exit \$status" 0 1 2 3 15 # get standard environment, filters and checks . ./common/rc . ./common/filter . ./common/dmflakey # real QA test starts here _supported_fs generic _supported_os Linux _need_to_be_root _require_scratch _require_dm_flakey _require_metadata_journaling $SCRATCH_DEV _crash_and_mount() { # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey # Allow writes again and mount. This makes the fs replay its fsync log. _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey } rm -f $seqres.full _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create the test file with some initial data and then fsync it. # The fsync here is only needed to trigger the issue in btrfs, as it causes the # the flag BTRFS_INODE_NEEDS_FULL_SYNC to be removed from the btrfs inode. $XFS_IO_PROG -f -c "pwrite -S 0xaa 0 32k" \ -c "fsync" \ $SCRATCH_MNT/foo | _filter_xfs_io sync # Add a hard link to our file. # On btrfs this sets the flag BTRFS_INODE_COPY_EVERYTHING on the btrfs inode, # which is a necessary condition to trigger the issue. ln $SCRATCH_MNT/foo $SCRATCH_MNT/bar # Sync the filesystem to force a commit of the current btrfs transaction, this # is a necessary condition to trigger the bug on btrfs. sync # Now append more data to our file, increasing its size, and fsync the file. # In btrfs because the inode flag BTRFS_INODE_COPY_EVERYTHING was set and the # write path did not update the inode item in the btree nor the delayed inode # item (in memory struture) in the current transaction (created by the fsync # handler), the fsync did not record the inode's new i_size in the fsync # log/journal. This made the data unavailable after the fsync log/journal is # replayed. $XFS_IO_PROG -c "pwrite -S 0xbb 32K 32K" \ -c "fsync" \ $SCRATCH_MNT/foo | _filter_xfs_io echo "File content after fsync and before crash:" od -t x1 $SCRATCH_MNT/foo _crash_and_mount echo "File content after crash and log replay:" od -t x1 $SCRATCH_MNT/foo status=0 exit The expected file output before and after the crash/power failure expects the appended data to be available, which is: 0000000 aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa * 0100000 bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb * 0200000 Cc: stable@vger.kernel.org Signed-off-by: Filipe Manana Reviewed-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 1ce80c1..76c4f9d 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4155,6 +4155,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; u64 logged_isize = 0; + bool need_log_inode_item = true; path = btrfs_alloc_path(); if (!path) @@ -4263,11 +4264,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, } else { if (inode_only == LOG_INODE_ALL) fast_search = true; - ret = log_inode_item(trans, log, dst_path, inode); - if (ret) { - err = ret; - goto out_unlock; - } goto log_extents; } @@ -4290,6 +4286,9 @@ again: if (min_key.type > max_key.type) break; + if (min_key.type == BTRFS_INODE_ITEM_KEY) + need_log_inode_item = false; + src = path->nodes[0]; if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { ins_nr++; @@ -4360,6 +4359,11 @@ next_slot: log_extents: btrfs_release_path(path); btrfs_release_path(dst_path); + if (need_log_inode_item) { + err = log_inode_item(trans, log, dst_path, inode); + if (err) + goto out_unlock; + } if (fast_search) { /* * Some ordered extents started by fsync might have completed -- cgit v0.10.2 From 36283bf777d963fac099213297e155d071096994 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 20 Jun 2015 00:44:51 +0100 Subject: Btrfs: fix fsync xattr loss in the fast fsync path After commit 4f764e515361 ("Btrfs: remove deleted xattrs on fsync log replay"), we can end up in a situation where during log replay we end up deleting xattrs that were never deleted when their file was last fsynced. This happens in the fast fsync path (flag BTRFS_INODE_NEEDS_FULL_SYNC is not set in the inode) if the inode has the flag BTRFS_INODE_COPY_EVERYTHING set, the xattr was added in a past transaction and the leaf where the xattr is located was not updated (COWed or created) in the current transaction. In this scenario the xattr item never ends up in the log tree and therefore at log replay time, which makes the replay code delete the xattr from the fs/subvol tree as it thinks that xattr was deleted prior to the last fsync. Fix this by always logging all xattrs, which is the simplest and most reliable way to detect deleted xattrs and replay the deletes at log replay time. This issue is reproducible with the following test case for fstests: seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" here=`pwd` tmp=/tmp/$$ status=1 # failure is the default! _cleanup() { _cleanup_flakey rm -f $tmp.* } trap "_cleanup; exit \$status" 0 1 2 3 15 # get standard environment, filters and checks . ./common/rc . ./common/filter . ./common/dmflakey . ./common/attr # real QA test starts here # We create a lot of xattrs for a single file. Only btrfs and xfs are currently # able to store such a large mount of xattrs per file, other filesystems such # as ext3/4 and f2fs for example, fail with ENOSPC even if we attempt to add # less than 1000 xattrs with very small values. _supported_fs btrfs xfs _supported_os Linux _need_to_be_root _require_scratch _require_dm_flakey _require_attrs _require_metadata_journaling $SCRATCH_DEV rm -f $seqres.full _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create the test file with some initial data and make sure everything is # durably persisted. $XFS_IO_PROG -f -c "pwrite -S 0xaa 0 32k" $SCRATCH_MNT/foo | _filter_xfs_io sync # Add many small xattrs to our file. # We create such a large amount because it's needed to trigger the issue found # in btrfs - we need to have an amount that causes the fs to have at least 3 # btree leafs with xattrs stored in them, and it must work on any leaf size # (maximum leaf/node size is 64Kb). num_xattrs=2000 for ((i = 1; i <= $num_xattrs; i++)); do name="user.attr_$(printf "%04d" $i)" $SETFATTR_PROG -n $name -v "val_$(printf "%04d" $i)" $SCRATCH_MNT/foo done # Sync the filesystem to force a commit of the current btrfs transaction, this # is a necessary condition to trigger the bug on btrfs. sync # Now update our file's data and fsync the file. # After a successful fsync, if the fsync log/journal is replayed we expect to # see all the xattrs we added before with the same values (and the updated file # data of course). Btrfs used to delete some of these xattrs when it replayed # its fsync log/journal. $XFS_IO_PROG -c "pwrite -S 0xbb 8K 16K" \ -c "fsync" \ $SCRATCH_MNT/foo | _filter_xfs_io # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey # Allow writes again and mount. This makes the fs replay its fsync log. _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey echo "File content after crash and log replay:" od -t x1 $SCRATCH_MNT/foo echo "File xattrs after crash and log replay:" for ((i = 1; i <= $num_xattrs; i++)); do name="user.attr_$(printf "%04d" $i)" echo -n "$name=" $GETFATTR_PROG --absolute-names -n $name --only-values $SCRATCH_MNT/foo echo done status=0 exit The golden output expects all xattrs to be available, and with the correct values, after the fsync log is replayed. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 76c4f9d..66f8715 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4117,6 +4117,86 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode, return 0; } +/* + * At the moment we always log all xattrs. This is to figure out at log replay + * time which xattrs must have their deletion replayed. If a xattr is missing + * in the log tree and exists in the fs/subvol tree, we delete it. This is + * because if a xattr is deleted, the inode is fsynced and a power failure + * happens, causing the log to be replayed the next time the fs is mounted, + * we want the xattr to not exist anymore (same behaviour as other filesystems + * with a journal, ext3/4, xfs, f2fs, etc). + */ +static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, + struct btrfs_path *path, + struct btrfs_path *dst_path) +{ + int ret; + struct btrfs_key key; + const u64 ino = btrfs_ino(inode); + int ins_nr = 0; + int start_slot = 0; + + key.objectid = ino; + key.type = BTRFS_XATTR_ITEM_KEY; + key.offset = 0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + return ret; + + while (true) { + int slot = path->slots[0]; + struct extent_buffer *leaf = path->nodes[0]; + int nritems = btrfs_header_nritems(leaf); + + if (slot >= nritems) { + if (ins_nr > 0) { + u64 last_extent = 0; + + ret = copy_items(trans, inode, dst_path, path, + &last_extent, start_slot, + ins_nr, 1, 0); + /* can't be 1, extent items aren't processed */ + ASSERT(ret <= 0); + if (ret < 0) + return ret; + ins_nr = 0; + } + ret = btrfs_next_leaf(root, path); + if (ret < 0) + return ret; + else if (ret > 0) + break; + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) + break; + + if (ins_nr == 0) + start_slot = slot; + ins_nr++; + path->slots[0]++; + cond_resched(); + } + if (ins_nr > 0) { + u64 last_extent = 0; + + ret = copy_items(trans, inode, dst_path, path, + &last_extent, start_slot, + ins_nr, 1, 0); + /* can't be 1, extent items aren't processed */ + ASSERT(ret <= 0); + if (ret < 0) + return ret; + } + + return 0; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -4289,6 +4369,25 @@ again: if (min_key.type == BTRFS_INODE_ITEM_KEY) need_log_inode_item = false; + /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ + if (min_key.type == BTRFS_XATTR_ITEM_KEY) { + if (ins_nr == 0) + goto next_slot; + ret = copy_items(trans, inode, dst_path, path, + &last_extent, ins_start_slot, + ins_nr, inode_only, logged_isize); + if (ret < 0) { + err = ret; + goto out_unlock; + } + ins_nr = 0; + if (ret) { + btrfs_release_path(path); + continue; + } + goto next_slot; + } + src = path->nodes[0]; if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { ins_nr++; @@ -4356,6 +4455,11 @@ next_slot: ins_nr = 0; } + btrfs_release_path(path); + btrfs_release_path(dst_path); + err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); + if (err) + goto out_unlock; log_extents: btrfs_release_path(path); btrfs_release_path(dst_path); -- cgit v0.10.2 From db490cb9aeb77b392f4bdaad3ebe96ea1d5c7bfe Mon Sep 17 00:00:00 2001 From: Ingo Tuchscherer Date: Tue, 17 Mar 2015 16:02:20 +0100 Subject: s390/zcrypt: enable s390 hwrng to seed kernel entropy Set the 'quality' property in the zcrypt rng device structure to enable the zcrypt hwrng device to take part in the kernel entropy seeding process. A module parameter named hwrng_seed will be introduced to disable the participation. By default this parameter is set to 1 (enabled). Signed-off-by: Ingo Tuchscherer Signed-off-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 08f1830..01bf1f5 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +static int zcrypt_hwrng_seed = 1; +module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); +MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); + static DEFINE_SPINLOCK(zcrypt_device_lock); static LIST_HEAD(zcrypt_device_list); static int zcrypt_device_count = 0; @@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) static struct hwrng zcrypt_rng_dev = { .name = "zcrypt", .data_read = zcrypt_rng_data_read, + .quality = 990, }; static int zcrypt_rng_device_add(void) @@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void) goto out; } zcrypt_rng_buffer_index = 0; + if (!zcrypt_hwrng_seed) + zcrypt_rng_dev.quality = 0; rc = hwrng_register(&zcrypt_rng_dev); if (rc) goto out_free; -- cgit v0.10.2 From a313bdc5310dd807655d3ca3eb2219cd65dfe45a Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 25 Jun 2015 09:32:22 +0200 Subject: s390/sclp: fix compile error Fix this error when compiling with CONFIG_SMP=n and CONFIG_DYNAMIC_DEBUG=y: drivers/s390/char/sclp_early.c: In function 'sclp_read_info_early': drivers/s390/char/sclp_early.c:87:19: error: 'EBUSY' undeclared (first use in this function) } while (rc == -EBUSY); ^ Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index aeed796..7bc6df3 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -7,6 +7,7 @@ #define KMSG_COMPONENT "sclp_early" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include #include #include -- cgit v0.10.2 From a215c8fbde8600d133f122691d0f8c30de6dda02 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 25 Jun 2015 14:52:13 +0200 Subject: s390/oprofile: fix compile error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these errors when compiling with CONFIG_OPROFILE=y and CONFIG_PERF_EVENTS=n: arch/s390/oprofile/init.c: In function ‘oprofile_hwsampler_start’: arch/s390/oprofile/init.c:93:2: error: implicit declaration of function 'perf_reserve_sampling' [-Werror=implicit-function-declaration] retval = perf_reserve_sampling(); ^ arch/s390/oprofile/init.c:99:3: error: implicit declaration of function 'perf_release_sampling' [-Werror=implicit-function-declaration] perf_release_sampling(); ^ Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 4cb19fe..f897ec7 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -87,7 +87,15 @@ struct sf_raw_sample { } __packed; /* Perf hardware reserve and release functions */ +#ifdef CONFIG_PERF_EVENTS int perf_reserve_sampling(void); void perf_release_sampling(void); +#else /* CONFIG_PERF_EVENTS */ +static inline int perf_reserve_sampling(void) +{ + return 0; +} +static inline void perf_release_sampling(void) {} +#endif /* CONFIG_PERF_EVENTS */ #endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index bc927a0..9cfa2ff 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "../../../drivers/oprofile/oprof.h" -- cgit v0.10.2 From 0689a86ae814f39af94a9736a0a5426dd82eb107 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominic=20Sacr=C3=A9?= Date: Tue, 30 Jun 2015 17:41:33 +0200 Subject: ALSA: usb-audio: Add MIDI support for Steinberg MI2/MI4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Steinberg MI2 and MI4 interfaces are compatible with the USB class audio spec, but the MIDI part of the devices is reported as a vendor specific interface. This patch adds entries to quirks-table.h to recognize the MIDI endpoints. Audio functionality was already working and is unaffected by this change. Signed-off-by: Dominic Sacré Signed-off-by: Albert Huitsing Acked-by: Clemens Ladisch Cc: Signed-off-by: Takashi Iwai diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 2f6d3e9..e475665 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"), } }, +/* Steinberg devices */ +{ + /* Steinberg MI2 */ + USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = & (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 3, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = &(const struct snd_usb_midi_endpoint_info) { + .out_cables = 0x0001, + .in_cables = 0x0001 + } + }, + { + .ifnum = -1 + } + } + } +}, +{ + /* Steinberg MI4 */ + USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = & (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 3, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = &(const struct snd_usb_midi_endpoint_info) { + .out_cables = 0x0001, + .in_cables = 0x0001 + } + }, + { + .ifnum = -1 + } + } + } +}, + /* TerraTec devices */ { USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), -- cgit v0.10.2 From a89ca6f24ffe435edad57de02eaabd37a2c6bff6 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 25 Jun 2015 04:17:46 +0100 Subject: Btrfs: fix fsync after truncate when no_holes feature is enabled When we have the no_holes feature enabled, if a we truncate a file to a smaller size, truncate it again but to a size greater than or equals to its original size and fsync it, the log tree will not have any information about the hole covering the range [truncate_1_offset, new_file_size[. Which means if the fsync log is replayed, the file will remain with the state it had before both truncate operations. Without the no_holes feature this does not happen, since when the inode is logged (full sync flag is set) it will find in the fs/subvol tree a leaf with a generation matching the current transaction id that has an explicit extent item representing the hole. Fix this by adding an explicit extent item representing a hole between the last extent and the inode's i_size if we are doing a full sync. The issue is easy to reproduce with the following test case for fstests: . ./common/rc . ./common/filter . ./common/dmflakey _need_to_be_root _supported_fs generic _supported_os Linux _require_scratch _require_dm_flakey # This test was motivated by an issue found in btrfs when the btrfs # no-holes feature is enabled (introduced in kernel 3.14). So enable # the feature if the fs being tested is btrfs. if [ $FSTYP == "btrfs" ]; then _require_btrfs_fs_feature "no_holes" _require_btrfs_mkfs_feature "no-holes" MKFS_OPTIONS="$MKFS_OPTIONS -O no-holes" fi rm -f $seqres.full _scratch_mkfs >>$seqres.full 2>&1 _init_flakey _mount_flakey # Create our test files and make sure everything is durably persisted. $XFS_IO_PROG -f -c "pwrite -S 0xaa 0 64K" \ -c "pwrite -S 0xbb 64K 61K" \ $SCRATCH_MNT/foo | _filter_xfs_io $XFS_IO_PROG -f -c "pwrite -S 0xee 0 64K" \ -c "pwrite -S 0xff 64K 61K" \ $SCRATCH_MNT/bar | _filter_xfs_io sync # Now truncate our file foo to a smaller size (64Kb) and then truncate # it to the size it had before the shrinking truncate (125Kb). Then # fsync our file. If a power failure happens after the fsync, we expect # our file to have a size of 125Kb, with the first 64Kb of data having # the value 0xaa and the second 61Kb of data having the value 0x00. $XFS_IO_PROG -c "truncate 64K" \ -c "truncate 125K" \ -c "fsync" \ $SCRATCH_MNT/foo # Do something similar to our file bar, but the first truncation sets # the file size to 0 and the second truncation expands the size to the # double of what it was initially. $XFS_IO_PROG -c "truncate 0" \ -c "truncate 253K" \ -c "fsync" \ $SCRATCH_MNT/bar _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey # Allow writes again, mount to trigger log replay and validate file # contents. _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # We expect foo to have a size of 125Kb, the first 64Kb of data all # having the value 0xaa and the remaining 61Kb to be a hole (all bytes # with value 0x00). echo "File foo content after log replay:" od -t x1 $SCRATCH_MNT/foo # We expect bar to have a size of 253Kb and no extents (any byte read # from bar has the value 0x00). echo "File bar content after log replay:" od -t x1 $SCRATCH_MNT/bar status=0 exit The expected file contents in the golden output are: File foo content after log replay: 0000000 aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa * 0200000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 0372000 File bar content after log replay: 0000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 0772000 Without this fix, their contents are: File foo content after log replay: 0000000 aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa * 0200000 bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb * 0372000 File bar content after log replay: 0000000 ee ee ee ee ee ee ee ee ee ee ee ee ee ee ee ee * 0200000 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff * 0372000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 0772000 A test case submission for fstests follows soon. Signed-off-by: Filipe Manana Reviewed-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 66f8715..9c45431 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4197,6 +4197,107 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, return 0; } +/* + * If the no holes feature is enabled we need to make sure any hole between the + * last extent and the i_size of our inode is explicitly marked in the log. This + * is to make sure that doing something like: + * + * 1) create file with 128Kb of data + * 2) truncate file to 64Kb + * 3) truncate file to 256Kb + * 4) fsync file + * 5) + * 6) mount fs and trigger log replay + * + * Will give us a file with a size of 256Kb, the first 64Kb of data match what + * the file had in its first 64Kb of data at step 1 and the last 192Kb of the + * file correspond to a hole. The presence of explicit holes in a log tree is + * what guarantees that log replay will remove/adjust file extent items in the + * fs/subvol tree. + * + * Here we do not need to care about holes between extents, that is already done + * by copy_items(). We also only need to do this in the full sync path, where we + * lookup for extents from the fs/subvol tree only. In the fast path case, we + * lookup the list of modified extent maps and if any represents a hole, we + * insert a corresponding extent representing a hole in the log tree. + */ +static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, + struct btrfs_path *path) +{ + int ret; + struct btrfs_key key; + u64 hole_start; + u64 hole_size; + struct extent_buffer *leaf; + struct btrfs_root *log = root->log_root; + const u64 ino = btrfs_ino(inode); + const u64 i_size = i_size_read(inode); + + if (!btrfs_fs_incompat(root->fs_info, NO_HOLES)) + return 0; + + key.objectid = ino; + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = (u64)-1; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + ASSERT(ret != 0); + if (ret < 0) + return ret; + + ASSERT(path->slots[0] > 0); + path->slots[0]--; + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + + if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { + /* inode does not have any extents */ + hole_start = 0; + hole_size = i_size; + } else { + struct btrfs_file_extent_item *extent; + u64 len; + + /* + * If there's an extent beyond i_size, an explicit hole was + * already inserted by copy_items(). + */ + if (key.offset >= i_size) + return 0; + + extent = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + + if (btrfs_file_extent_type(leaf, extent) == + BTRFS_FILE_EXTENT_INLINE) { + len = btrfs_file_extent_inline_len(leaf, + path->slots[0], + extent); + ASSERT(len == i_size); + return 0; + } + + len = btrfs_file_extent_num_bytes(leaf, extent); + /* Last extent goes beyond i_size, no need to log a hole. */ + if (key.offset + len > i_size) + return 0; + hole_start = key.offset + len; + hole_size = i_size - hole_start; + } + btrfs_release_path(path); + + /* Last extent ends at i_size. */ + if (hole_size == 0) + return 0; + + hole_size = ALIGN(hole_size, root->sectorsize); + ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, + hole_size, 0, hole_size, 0, 0, 0); + return ret; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -4460,6 +4561,13 @@ next_slot: err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); if (err) goto out_unlock; + if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { + btrfs_release_path(path); + btrfs_release_path(dst_path); + err = btrfs_log_trailing_hole(trans, root, inode, path); + if (err) + goto out_unlock; + } log_extents: btrfs_release_path(path); btrfs_release_path(dst_path); -- cgit v0.10.2 From 207910ddeeda38fd54544d94f8c8ca5a9632cc25 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 30 Jun 2015 14:42:04 -0700 Subject: btrfs: pass unaligned length to btrfs_cmp_data() In the case that we dedupe the tail of a file, we might expand the dedupe len out to the end of our last block. We don't want to compare data past i_size however, so pass the original length to btrfs_cmp_data(). Signed-off-by: Mark Fasheh Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c86b835..5550433 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2943,7 +2943,8 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, goto out_unlock; } - ret = btrfs_cmp_data(src, loff, dst, dst_loff, len); + /* pass original length for comparison so we stay within i_size */ + ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen); if (ret == 0) ret = btrfs_clone(src, dst, loff, olen, len, dst_loff); -- cgit v0.10.2 From f441460202cb787c49963bcc1f54cb48c52f7512 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 30 Jun 2015 14:42:05 -0700 Subject: btrfs: fix deadlock with extent-same and readpage ->readpage() does page_lock() before extent_lock(), we do the opposite in extent-same. We want to reverse the order in btrfs_extent_same() but it's not quite straightforward since the page locks are taken inside btrfs_cmp_data(). So I split btrfs_cmp_data() into 3 parts with a small context structure that is passed between them. The first, btrfs_cmp_data_prepare() gathers up the pages needed (taking page lock as required) and puts them on our context structure. At this point, we are safe to lock the extent range. Afterwards, we use btrfs_cmp_data() to do the data compare as usual and btrfs_cmp_data_free() to clean up our context. Signed-off-by: Mark Fasheh Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5550433..9ebe2dd 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2765,14 +2765,11 @@ out: return ret; } -static struct page *extent_same_get_page(struct inode *inode, u64 off) +static struct page *extent_same_get_page(struct inode *inode, pgoff_t index) { struct page *page; - pgoff_t index; struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; - index = off >> PAGE_CACHE_SHIFT; - page = grab_cache_page(inode->i_mapping, index); if (!page) return NULL; @@ -2793,6 +2790,20 @@ static struct page *extent_same_get_page(struct inode *inode, u64 off) return page; } +static int gather_extent_pages(struct inode *inode, struct page **pages, + int num_pages, u64 off) +{ + int i; + pgoff_t index = off >> PAGE_CACHE_SHIFT; + + for (i = 0; i < num_pages; i++) { + pages[i] = extent_same_get_page(inode, index + i); + if (!pages[i]) + return -ENOMEM; + } + return 0; +} + static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) { /* do any pending delalloc/csum calc on src, one way or @@ -2818,52 +2829,120 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) } } -static void btrfs_double_unlock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) +static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2) { - unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); - unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); - mutex_unlock(&inode1->i_mutex); mutex_unlock(&inode2->i_mutex); } -static void btrfs_double_lock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) +static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2) +{ + if (inode1 < inode2) + swap(inode1, inode2); + + mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); + if (inode1 != inode2) + mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); +} + +static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len) +{ + unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); + unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); +} + +static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len) { if (inode1 < inode2) { swap(inode1, inode2); swap(loff1, loff2); } - - mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); lock_extent_range(inode1, loff1, len); - if (inode1 != inode2) { - mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); + if (inode1 != inode2) lock_extent_range(inode2, loff2, len); +} + +struct cmp_pages { + int num_pages; + struct page **src_pages; + struct page **dst_pages; +}; + +static void btrfs_cmp_data_free(struct cmp_pages *cmp) +{ + int i; + struct page *pg; + + for (i = 0; i < cmp->num_pages; i++) { + pg = cmp->src_pages[i]; + if (pg) + page_cache_release(pg); + pg = cmp->dst_pages[i]; + if (pg) + page_cache_release(pg); + } + kfree(cmp->src_pages); + kfree(cmp->dst_pages); +} + +static int btrfs_cmp_data_prepare(struct inode *src, u64 loff, + struct inode *dst, u64 dst_loff, + u64 len, struct cmp_pages *cmp) +{ + int ret; + int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; + struct page **src_pgarr, **dst_pgarr; + + /* + * We must gather up all the pages before we initiate our + * extent locking. We use an array for the page pointers. Size + * of the array is bounded by len, which is in turn bounded by + * BTRFS_MAX_DEDUPE_LEN. + */ + src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS); + dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS); + if (!src_pgarr || !dst_pgarr) { + kfree(src_pgarr); + kfree(dst_pgarr); + return -ENOMEM; } + cmp->num_pages = num_pages; + cmp->src_pages = src_pgarr; + cmp->dst_pages = dst_pgarr; + + ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff); + if (ret) + goto out; + + ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff); + +out: + if (ret) + btrfs_cmp_data_free(cmp); + return 0; } static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, - u64 dst_loff, u64 len) + u64 dst_loff, u64 len, struct cmp_pages *cmp) { int ret = 0; + int i; struct page *src_page, *dst_page; unsigned int cmp_len = PAGE_CACHE_SIZE; void *addr, *dst_addr; + i = 0; while (len) { if (len < PAGE_CACHE_SIZE) cmp_len = len; - src_page = extent_same_get_page(src, loff); - if (!src_page) - return -EINVAL; - dst_page = extent_same_get_page(dst, dst_loff); - if (!dst_page) { - page_cache_release(src_page); - return -EINVAL; - } + BUG_ON(i >= cmp->num_pages); + + src_page = cmp->src_pages[i]; + dst_page = cmp->dst_pages[i]; + addr = kmap_atomic(src_page); dst_addr = kmap_atomic(dst_page); @@ -2875,15 +2954,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, kunmap_atomic(addr); kunmap_atomic(dst_addr); - page_cache_release(src_page); - page_cache_release(dst_page); if (ret) break; - loff += cmp_len; - dst_loff += cmp_len; len -= cmp_len; + i++; } return ret; @@ -2914,6 +2990,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, { int ret; u64 len = olen; + struct cmp_pages cmp; /* * btrfs_clone() can't handle extents in the same file @@ -2926,7 +3003,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, if (len == 0) return 0; - btrfs_double_lock(src, loff, dst, dst_loff, len); + btrfs_double_inode_lock(src, dst); ret = extent_same_check_offsets(src, loff, &len, olen); if (ret) @@ -2943,13 +3020,22 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, goto out_unlock; } + ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp); + if (ret) + goto out_unlock; + + btrfs_double_extent_lock(src, loff, dst, dst_loff, len); + /* pass original length for comparison so we stay within i_size */ - ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen); + ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp); if (ret == 0) ret = btrfs_clone(src, dst, loff, olen, len, dst_loff); + btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); + + btrfs_cmp_data_free(&cmp); out_unlock: - btrfs_double_unlock(src, loff, dst, dst_loff, len); + btrfs_double_inode_unlock(src, dst); return ret; } -- cgit v0.10.2 From 0efa9f48c7e6c15e75946dd2b1c82d3d19e13545 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 30 Jun 2015 14:42:07 -0700 Subject: btrfs: allow dedupe of same inode clone() supports cloning within an inode so extent-same can do the same now. This patch fixes up the locking in extent-same to know about the single-inode case. In addition to that, we add a check for overlapping ranges, which clone does not allow. Signed-off-by: Mark Fasheh Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9ebe2dd..af06494 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2991,27 +2991,61 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, int ret; u64 len = olen; struct cmp_pages cmp; + int same_inode = 0; + u64 same_lock_start = 0; + u64 same_lock_len = 0; - /* - * btrfs_clone() can't handle extents in the same file - * yet. Once that works, we can drop this check and replace it - * with a check for the same inode, but overlapping extents. - */ if (src == dst) - return -EINVAL; + same_inode = 1; if (len == 0) return 0; - btrfs_double_inode_lock(src, dst); + if (same_inode) { + mutex_lock(&src->i_mutex); - ret = extent_same_check_offsets(src, loff, &len, olen); - if (ret) - goto out_unlock; + ret = extent_same_check_offsets(src, loff, &len, olen); + if (ret) + goto out_unlock; - ret = extent_same_check_offsets(dst, dst_loff, &len, olen); - if (ret) - goto out_unlock; + /* + * Single inode case wants the same checks, except we + * don't want our length pushed out past i_size as + * comparing that data range makes no sense. + * + * extent_same_check_offsets() will do this for an + * unaligned length at i_size, so catch it here and + * reject the request. + * + * This effectively means we require aligned extents + * for the single-inode case, whereas the other cases + * allow an unaligned length so long as it ends at + * i_size. + */ + if (len != olen) { + ret = -EINVAL; + goto out_unlock; + } + + /* Check for overlapping ranges */ + if (dst_loff + len > loff && dst_loff < loff + len) { + ret = -EINVAL; + goto out_unlock; + } + + same_lock_start = min_t(u64, loff, dst_loff); + same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; + } else { + btrfs_double_inode_lock(src, dst); + + ret = extent_same_check_offsets(src, loff, &len, olen); + if (ret) + goto out_unlock; + + ret = extent_same_check_offsets(dst, dst_loff, &len, olen); + if (ret) + goto out_unlock; + } /* don't make the dst file partly checksummed */ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != @@ -3024,18 +3058,28 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, if (ret) goto out_unlock; - btrfs_double_extent_lock(src, loff, dst, dst_loff, len); + if (same_inode) + lock_extent_range(src, same_lock_start, same_lock_len); + else + btrfs_double_extent_lock(src, loff, dst, dst_loff, len); /* pass original length for comparison so we stay within i_size */ ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp); if (ret == 0) ret = btrfs_clone(src, dst, loff, olen, len, dst_loff); - btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); + if (same_inode) + unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start, + same_lock_start + same_lock_len - 1); + else + btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); btrfs_cmp_data_free(&cmp); out_unlock: - btrfs_double_inode_unlock(src, dst); + if (same_inode) + mutex_unlock(&src->i_mutex); + else + btrfs_double_inode_unlock(src, dst); return ret; } -- cgit v0.10.2 From 1c919a5e13702caffbe2d2c7c305f9d0d2925160 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 30 Jun 2015 14:42:08 -0700 Subject: btrfs: don't update mtime/ctime on deduped inodes One issue users have reported is that dedupe changes mtime on files, resulting in tools like rsync thinking that their contents have changed when in fact the data is exactly the same. We also skip the ctime update as no user-visible metadata changes here and we want dedupe to be transparent to the user. Clone still wants time changes, so we special case this in the code. This was tested with the btrfs-extent-same tool. Signed-off-by: Mark Fasheh Signed-off-by: Chris Mason diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index af06494..5d91776 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -87,7 +87,8 @@ struct btrfs_ioctl_received_subvol_args_32 { static int btrfs_clone(struct inode *src, struct inode *inode, - u64 off, u64 olen, u64 olen_aligned, u64 destoff); + u64 off, u64 olen, u64 olen_aligned, u64 destoff, + int no_time_update); /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) @@ -3066,7 +3067,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, /* pass original length for comparison so we stay within i_size */ ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp); if (ret == 0) - ret = btrfs_clone(src, dst, loff, olen, len, dst_loff); + ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); if (same_inode) unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start, @@ -3231,13 +3232,15 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, struct inode *inode, u64 endoff, const u64 destoff, - const u64 olen) + const u64 olen, + int no_time_update) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; inode_inc_iversion(inode); - inode->i_mtime = inode->i_ctime = CURRENT_TIME; + if (!no_time_update) + inode->i_mtime = inode->i_ctime = CURRENT_TIME; /* * We round up to the block size at eof when determining which * extents to clone above, but shouldn't round up the file size. @@ -3322,13 +3325,13 @@ static void clone_update_extent_map(struct inode *inode, * @inode: Inode to clone to * @off: Offset within source to start clone from * @olen: Original length, passed by user, of range to clone - * @olen_aligned: Block-aligned value of olen, extent_same uses - * identical values here + * @olen_aligned: Block-aligned value of olen * @destoff: Offset within @inode to start clone + * @no_time_update: Whether to update mtime/ctime on the target inode */ static int btrfs_clone(struct inode *src, struct inode *inode, const u64 off, const u64 olen, const u64 olen_aligned, - const u64 destoff) + const u64 destoff, int no_time_update) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path = NULL; @@ -3652,7 +3655,8 @@ process_slot: root->sectorsize); ret = clone_finish_inode_update(trans, inode, last_dest_end, - destoff, olen); + destoff, olen, + no_time_update); if (ret) goto out; if (new_key.offset + datal >= destoff + len) @@ -3690,7 +3694,7 @@ process_slot: clone_update_extent_map(inode, trans, NULL, last_dest_end, destoff + len - last_dest_end); ret = clone_finish_inode_update(trans, inode, destoff + len, - destoff, olen); + destoff, olen, no_time_update); } out: @@ -3827,7 +3831,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, lock_extent_range(inode, destoff, len); } - ret = btrfs_clone(src, inode, off, olen, len, destoff); + ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); if (same_inode) { u64 lock_start = min_t(u64, off, destoff); -- cgit v0.10.2 From 61de718fceb6bc028dafe4d06a1f87a9e0998303 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 1 Jul 2015 12:13:10 +0100 Subject: Btrfs: fix memory corruption on failure to submit bio for direct IO If we fail to submit a bio for a direct IO request, we were grabbing the corresponding ordered extent and decrementing its reference count twice, once for our lookup reference and once for the ordered tree reference. This was a problem because it caused the ordered extent to be freed without removing it from the ordered tree and any lists it might be attached to, leaving dangling pointers to the ordered extent around. Example trace with CONFIG_DEBUG_PAGEALLOC=y: [161779.858707] BUG: unable to handle kernel paging request at 0000000087654330 [161779.859983] IP: [] rb_prev+0x22/0x3b [161779.860636] PGD 34d818067 PUD 0 [161779.860636] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC (...) [161779.860636] Call Trace: [161779.860636] [] __tree_search+0xd9/0xf9 [btrfs] [161779.860636] [] tree_search+0x42/0x63 [btrfs] [161779.860636] [] ? btrfs_lookup_ordered_range+0x2d/0xa5 [btrfs] [161779.860636] [] btrfs_lookup_ordered_range+0x38/0xa5 [btrfs] [161779.860636] [] btrfs_get_blocks_direct+0x11b/0x615 [btrfs] [161779.860636] [] do_blockdev_direct_IO+0x5ff/0xb43 [161779.860636] [] ? btrfs_page_exists_in_range+0x1ad/0x1ad [btrfs] [161779.860636] [] ? btrfs_get_extent_fiemap+0x1bc/0x1bc [btrfs] [161779.860636] [] __blockdev_direct_IO+0x32/0x34 [161779.860636] [] ? btrfs_get_extent_fiemap+0x1bc/0x1bc [btrfs] [161779.860636] [] btrfs_direct_IO+0x198/0x21f [btrfs] [161779.860636] [] ? btrfs_get_extent_fiemap+0x1bc/0x1bc [btrfs] [161779.860636] [] generic_file_direct_write+0xb3/0x128 [161779.860636] [] ? btrfs_file_write_iter+0x15f/0x3e0 [btrfs] [161779.860636] [] btrfs_file_write_iter+0x201/0x3e0 [btrfs] (...) We were also not freeing the btrfs_dio_private we allocated previously, which kmemleak reported with the following trace in its sysfs file: unreferenced object 0xffff8803f553bf80 (size 96): comm "xfs_io", pid 4501, jiffies 4295039588 (age 173.936s) hex dump (first 32 bytes): 88 6c 9b f5 02 88 ff ff 00 00 00 00 00 00 00 00 .l.............. 00 00 00 00 00 00 00 00 00 00 c4 00 00 00 00 00 ................ backtrace: [] create_object+0x172/0x29a [] kmemleak_alloc+0x25/0x41 [] kmemleak_alloc_recursive.constprop.40+0x16/0x18 [] kmem_cache_alloc_trace+0xfb/0x148 [] btrfs_submit_direct+0x65/0x16a [btrfs] [] dio_bio_submit+0x62/0x8f [] do_blockdev_direct_IO+0x97e/0xb43 [] __blockdev_direct_IO+0x32/0x34 [] btrfs_direct_IO+0x198/0x21f [btrfs] [] generic_file_direct_write+0xb3/0x128 [] btrfs_file_write_iter+0x201/0x3e0 [btrfs] [] __vfs_write+0x7c/0xa5 [] vfs_write+0xa0/0xe4 [] SyS_pwrite64+0x64/0x82 [] system_call_fastpath+0x12/0x6f [] 0xffffffffffffffff For read requests we weren't doing any cleanup either (none of the work done by btrfs_endio_direct_read()), so a failure submitting a bio for a read request would leave a range in the inode's io_tree locked forever, blocking any future operations (both reads and writes) against that range. So fix this by making sure we do the same cleanup that we do for the case where the bio submission succeeds. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 855935f..c0b2b6b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8163,9 +8163,8 @@ out_err: static void btrfs_submit_direct(int rw, struct bio *dio_bio, struct inode *inode, loff_t file_offset) { - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_dio_private *dip; - struct bio *io_bio; + struct btrfs_dio_private *dip = NULL; + struct bio *io_bio = NULL; struct btrfs_io_bio *btrfs_bio; int skip_sum; int write = rw & REQ_WRITE; @@ -8182,7 +8181,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, dip = kzalloc(sizeof(*dip), GFP_NOFS); if (!dip) { ret = -ENOMEM; - goto free_io_bio; + goto free_ordered; } dip->private = dio_bio->bi_private; @@ -8210,25 +8209,55 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, if (btrfs_bio->end_io) btrfs_bio->end_io(btrfs_bio, ret); -free_io_bio: - bio_put(io_bio); free_ordered: /* - * If this is a write, we need to clean up the reserved space and kill - * the ordered extent. + * If we arrived here it means either we failed to submit the dip + * or we either failed to clone the dio_bio or failed to allocate the + * dip. If we cloned the dio_bio and allocated the dip, we can just + * call bio_endio against our io_bio so that we get proper resource + * cleanup if we fail to submit the dip, otherwise, we must do the + * same as btrfs_endio_direct_[write|read] because we can't call these + * callbacks - they require an allocated dip and a clone of dio_bio. */ - if (write) { - struct btrfs_ordered_extent *ordered; - ordered = btrfs_lookup_ordered_extent(inode, file_offset); - if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && - !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) - btrfs_free_reserved_extent(root, ordered->start, - ordered->disk_len, 1); - btrfs_put_ordered_extent(ordered); - btrfs_put_ordered_extent(ordered); + if (io_bio && dip) { + bio_endio(io_bio, ret); + /* + * The end io callbacks free our dip, do the final put on io_bio + * and all the cleanup and final put for dio_bio (through + * dio_end_io()). + */ + dip = NULL; + io_bio = NULL; + } else { + if (write) { + struct btrfs_ordered_extent *ordered; + + ordered = btrfs_lookup_ordered_extent(inode, + file_offset); + set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); + /* + * Decrements our ref on the ordered extent and removes + * the ordered extent from the inode's ordered tree, + * doing all the proper resource cleanup such as for the + * reserved space and waking up any waiters for this + * ordered extent (through btrfs_remove_ordered_extent). + */ + btrfs_finish_ordered_io(ordered); + } else { + unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, + file_offset + dio_bio->bi_iter.bi_size - 1); + } + clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); + /* + * Releases and cleans up our dio_bio, no need to bio_put() + * nor bio_endio()/bio_io_error() against dio_bio. + */ + dio_end_io(dio_bio, ret); } - bio_endio(dio_bio, ret); + if (io_bio) + bio_put(io_bio); + kfree(dip); } static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 89656d7..52170cf 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -552,6 +552,10 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) trace_btrfs_ordered_extent_put(entry->inode, entry); if (atomic_dec_and_test(&entry->refs)) { + ASSERT(list_empty(&entry->log_list)); + ASSERT(list_empty(&entry->trans_list)); + ASSERT(list_empty(&entry->root_extent_list)); + ASSERT(RB_EMPTY_NODE(&entry->rb_node)); if (entry->inode) btrfs_add_delayed_iput(entry->inode); while (!list_empty(&entry->list)) { @@ -579,6 +583,7 @@ void btrfs_remove_ordered_extent(struct inode *inode, spin_lock_irq(&tree->lock); node = &entry->rb_node; rb_erase(node, &tree->tree); + RB_CLEAR_NODE(node); if (tree->last == node) tree->last = NULL; set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); -- cgit v0.10.2 From 9c6429d96daec64f6b5b10a1c6b02c7264541ea1 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 10 Jun 2015 12:55:41 +0100 Subject: Btrfs: fix a comment in inode.c:evict_inode_truncate_pages() The comment was not correct about the part where it says the endio callback of the bio might have not yet been called - update it to mention that by that time the endio callback execution might still be in progress only. Signed-off-by: Filipe Manana Reviewed-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c0b2b6b..53afda0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4989,8 +4989,9 @@ static void evict_inode_truncate_pages(struct inode *inode) /* * Keep looping until we have no more ranges in the io tree. * We can have ongoing bios started by readpages (called from readahead) - * that didn't get their end io callbacks called yet or they are still - * in progress ((extent_io.c:end_bio_extent_readpage()). This means some + * that have their endio callback (extent_io.c:end_bio_extent_readpage) + * still in progress (unlocked the pages in the bio but did not yet + * unlocked the ranges in the io tree). Therefore this means some * ranges can still be locked and eviction started because before * submitting those bios, which are executed by a separate task (work * queue kthread), inode references (inode->i_count) were not taken -- cgit v0.10.2 From ad9ee2053f3f2babebc09ebc4970daa66c56c7ee Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 17 Jun 2015 16:59:57 +0800 Subject: Btrfs: fix hang when failing to submit bio of directIO The hang is uncoverd by generic/019. btrfs_endio_direct_write() skips the "finish_ordered_fn" part when it hits an error, thus those added ordered extents will never get processed, which block processes that waiting for them via btrfs_start_ordered_extent(). This fixes the above, and meanwhile finish_ordered_fn will do the space accounting work. Signed-off-by: Liu Bo Reviewed-by: Filipe Manana Tested-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 53afda0..0b9fb81 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7872,8 +7872,6 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) struct bio *dio_bio; int ret; - if (err) - goto out_done; again: ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, &ordered_offset, @@ -7896,7 +7894,6 @@ out_test: ordered = NULL; goto again; } -out_done: dio_bio = dip->dio_bio; kfree(dip); -- cgit v0.10.2 From ddba1bfc2369cd0566bcfdab47599834a32d1c19 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 17 Jun 2015 16:59:58 +0800 Subject: Btrfs: fix warning of bytes_may_use While running generic/019, dmesg got several warnings from btrfs_free_reserved_data_space(). Test generic/019 produces some disk failures so sumbit dio will get errors, in which case, btrfs_direct_IO() goes to the error handling and free bytes_may_use, but the problem is that bytes_may_use has been free'd during get_block(). This adds a runtime flag to show if we've gone through get_block(), if so, don't do the cleanup work. Signed-off-by: Liu Bo Reviewed-by: Filipe Manana Tested-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 0ef5cc1..81220b2 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -44,6 +44,8 @@ #define BTRFS_INODE_IN_DELALLOC_LIST 9 #define BTRFS_INODE_READDIO_NEED_LOCK 10 #define BTRFS_INODE_HAS_PROPS 11 +/* DIO is ready to submit */ +#define BTRFS_INODE_DIO_READY 12 /* * The following 3 bits are meant only for the btree inode. * When any of them is set, it means an error happened while writing an diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0b9fb81..b33c0cf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7547,6 +7547,7 @@ unlock: current->journal_info = outstanding_extents; btrfs_free_reserved_data_space(inode, len); + set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags); } /* @@ -8357,9 +8358,18 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, btrfs_submit_direct, flags); if (iov_iter_rw(iter) == WRITE) { current->journal_info = NULL; - if (ret < 0 && ret != -EIOCBQUEUED) - btrfs_delalloc_release_space(inode, count); - else if (ret >= 0 && (size_t)ret < count) + if (ret < 0 && ret != -EIOCBQUEUED) { + /* + * If the error comes from submitting stage, + * btrfs_get_blocsk_direct() has free'd data space, + * and metadata space will be handled by + * finish_ordered_fn, don't do that again to make + * sure bytes_may_use is correct. + */ + if (!test_and_clear_bit(BTRFS_INODE_DIO_READY, + &BTRFS_I(inode)->runtime_flags)) + btrfs_delalloc_release_space(inode, count); + } else if (ret >= 0 && (size_t)ret < count) btrfs_delalloc_release_space(inode, count - (size_t)ret); } -- cgit v0.10.2 From 9689457b5b0a2b69874c421a489d3fb50ca76b7b Mon Sep 17 00:00:00 2001 From: Shilong Wang Date: Sun, 12 Apr 2015 14:35:20 +0800 Subject: Btrfs: fix wrong check for btrfs_force_chunk_alloc() btrfs_force_chunk_alloc() return 1 for allocation chunk successfully. This problem exists since commit c87f08ca4. With this patch, we might fix some enospc problems for balances. Signed-off-by: Wang Shilong Reviewed-by: Filipe Manana Tested-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 827951f..88cbb59 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4049,7 +4049,7 @@ restart: if (trans && progress && err == -ENOSPC) { ret = btrfs_force_chunk_alloc(trans, rc->extent_root, rc->block_group->flags); - if (ret == 0) { + if (ret == 1) { err = 0; progress = 0; goto restart; -- cgit v0.10.2 From 0f0ff9a9f3fa2ec6f427603fd521d5f3a0b076d1 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 1 Jul 2015 23:37:46 -0400 Subject: ext4: fix fencepost error in lazytime optimization Commit 8f4d8558391: "ext4: fix lazytime optimization" was not a complete fix. In the case where the inode number is a multiple of 16, and we could still end up updating an inode with dirty timestamps written to the wrong inode on disk. Oops. This can be easily reproduced by using generic/005 with a file system with metadata_csum and lazytime enabled. Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e057c6f..4ad73d3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4348,7 +4348,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb, int inode_size = EXT4_INODE_SIZE(sb); oi.orig_ino = orig_ino; - ino = (orig_ino & ~(inodes_per_block - 1)) + 1; + /* + * Calculate the first inode in the inode table block. Inode + * numbers are one-based. That is, the first inode in a block + * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). + */ + ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { if (ino == orig_ino) continue; -- cgit v0.10.2 From bd7ade3cd9b0850264306f5c2b79024a417b6396 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 2 Jul 2015 01:32:44 -0400 Subject: bufferhead: Add _gfp version for sb_getblk() sb_getblk() is used during ext4 (and possibly other FSes) writeback paths. Sometimes such path require allocating memory and guaranteeing that such allocation won't block. Currently, however, there is no way to provide user flags for sb_getblk which could lead to deadlocks. This patch implements a sb_getblk_gfp with the only difference it can accept user-provided GFP flags. Signed-off-by: Nikolay Borisov Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 73b4522..e6797de 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block) return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); } + +static inline struct buffer_head * +sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) +{ + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); +} + static inline struct buffer_head * sb_find_get_block(struct super_block *sb, sector_t block) { -- cgit v0.10.2 From c45653c341f5c8a0ce19c8f0ad4678640849cb86 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 2 Jul 2015 01:34:07 -0400 Subject: ext4: avoid deadlocks in the writeback path by using sb_getblk_gfp Switch ext4 to using sb_getblk_gfp with GFP_NOFS added to fix possible deadlocks in the page writeback path. Signed-off-by: Nikolay Borisov Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index d86d262..69af30e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -503,7 +503,7 @@ __read_extent_tree_block(const char *function, unsigned int line, struct buffer_head *bh; int err; - bh = sb_getblk(inode->i_sb, pblk); + bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); @@ -1088,7 +1088,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, err = -EIO; goto cleanup; } - bh = sb_getblk(inode->i_sb, newblock); + bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); if (unlikely(!bh)) { err = -ENOMEM; goto cleanup; @@ -1282,7 +1282,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, if (newblock == 0) return err; - bh = sb_getblk(inode->i_sb, newblock); + bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); if (unlikely(!bh)) return -ENOMEM; lock_buffer(bh); -- cgit v0.10.2 From c423bc85097327fba71f9a831280d09b64bb62b6 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 16 Jun 2015 14:54:51 +0300 Subject: drm/omap: check that plane is inside crtc DRM allows planes to be partially off-screen, but DSS hardware does not. This patch adds the necessary check to reject plane configs if the plane is not fully inside the crtc. Signed-off-by: Tomi Valkeinen Acked-by: Laurent Pinchart diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index cfa8276..0989046 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -17,6 +17,7 @@ * this program. If not, see . */ +#include #include #include @@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane, dispc_ovl_enable(omap_plane->id, false); } +static int omap_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + + if (!state->crtc) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (state->crtc_x < 0 || state->crtc_y < 0) + return -EINVAL; + + if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + return -EINVAL; + + if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay) + return -EINVAL; + + return 0; +} + static const struct drm_plane_helper_funcs omap_plane_helper_funcs = { .prepare_fb = omap_plane_prepare_fb, .cleanup_fb = omap_plane_cleanup_fb, + .atomic_check = omap_plane_atomic_check, .atomic_update = omap_plane_atomic_update, .atomic_disable = omap_plane_atomic_disable, }; -- cgit v0.10.2 From 96cbd142310cc7281f94739b27d8e4c308d588c5 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 28 Apr 2015 14:01:32 +0300 Subject: drm/omap: increase DMM transaction timeout The DMM driver uses a timeout of 1 ms to wait for DMM transaction to finish. While DMM should always finish the operation within that time, the timeout is rather strict. Small misbehavior of the system (e.g. an irq taking too long) could trigger the timeout. As the DMM is a critical piece of code for display memory management, let's increase the timeout to 100 ms so that we are less likely to fail a memory allocation in case of system misbehaviors. 100 ms is just a guess of a reasonably large timeout. The HW should accomplish the task in less than 1 ms. Signed-off-by: Tomi Valkeinen diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f2daad8..7841970 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) if (wait) { if (!wait_for_completion_timeout(&engine->compl, - msecs_to_jiffies(1))) { + msecs_to_jiffies(100))) { dev_err(dmm->dev, "timed out waiting for done\n"); ret = -ETIMEDOUT; } -- cgit v0.10.2 From 9c368506c96793f17934a61a0f06412afb7d2f8d Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 28 Apr 2015 14:01:35 +0300 Subject: drm/omap: fix omap_framebuffer_unpin() error handling omap_framebuffer_unpin() check the return value of omap_gem_put_paddr() and return immediately if omap_gem_put_paddr() fails. This patch removes the check for the return value, and also removes the return value of omap_framebuffer_unpin(), because: * Nothing checks the return value of omap_framebuffer_unpin(), and even something did check it, there's nothing the caller can do to handle the error. * If a omap_gem_put_paddr() fails, the framebuffer's other planes will be left unreleased. So it's better to call omap_gem_put_paddr() for all the planes, even if one would fail. Signed-off-by: Tomi Valkeinen Acked-by: Laurent Pinchart diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index ae2df41..2ef89c0 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); int omap_framebuffer_pin(struct drm_framebuffer *fb); -int omap_framebuffer_unpin(struct drm_framebuffer *fb); +void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, struct omap_drm_window *win, struct omap_overlay_info *info); struct drm_connector *omap_framebuffer_get_next_connector( diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 0b967e7..51b1219 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -287,10 +287,10 @@ fail: } /* unpin, no longer being scanned out: */ -int omap_framebuffer_unpin(struct drm_framebuffer *fb) +void omap_framebuffer_unpin(struct drm_framebuffer *fb) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - int ret, i, n = drm_format_num_planes(fb->pixel_format); + int i, n = drm_format_num_planes(fb->pixel_format); mutex_lock(&omap_fb->lock); @@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb) if (omap_fb->pin_count > 0) { mutex_unlock(&omap_fb->lock); - return 0; + return; } for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; - ret = omap_gem_put_paddr(plane->bo); - if (ret) - goto fail; + omap_gem_put_paddr(plane->bo); plane->paddr = 0; } mutex_unlock(&omap_fb->lock); - - return 0; - -fail: - mutex_unlock(&omap_fb->lock); - return ret; } struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) -- cgit v0.10.2 From 393a949f51994a67e8e33b2f79c6f2020959a7e2 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 28 Apr 2015 14:01:36 +0300 Subject: drm/omap: fix omap_gem_put_paddr() error handling If tiler_unpin() call in omap_gem_put_paddr() fails, omap_gem_put_paddr() will immediately stop processing and return an error. This patch remoes that error checking, and also removes omap_gem_put_paddr()'s return value, because: * The caller of omap_gem_put_paddr() can do nothing if an error happens, so it's pointless to return an error value * If tiler_unpin() fails, the GEM object will possibly be left in an undefined state, where the DMM mapping may have been removed, but the GEM object still thinks everything is as it should be, leading to crashes later. * There's no point in returning an error from a "free" call, as the caller can do nothing about it. So it's better to clean up as much as possible. Signed-off-by: Tomi Valkeinen Acked-by: Laurent Pinchart diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 2ef89c0..1ae8477 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj, enum dma_data_direction dir); int omap_gem_get_paddr(struct drm_gem_object *obj, dma_addr_t *paddr, bool remap); -int omap_gem_put_paddr(struct drm_gem_object *obj); +void omap_gem_put_paddr(struct drm_gem_object *obj); int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, bool remap); int omap_gem_put_pages(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 874eb6d..7ed08fdc 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -808,10 +808,10 @@ fail: /* Release physical address, when DMA is no longer being performed.. this * could potentially unpin and unmap buffers from TILER */ -int omap_gem_put_paddr(struct drm_gem_object *obj) +void omap_gem_put_paddr(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret = 0; + int ret; mutex_lock(&obj->dev->struct_mutex); if (omap_obj->paddr_cnt > 0) { @@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj) if (ret) { dev_err(obj->dev->dev, "could not unpin pages: %d\n", ret); - goto fail; } ret = tiler_release(omap_obj->block); if (ret) { @@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj) omap_obj->block = NULL; } } -fail: + mutex_unlock(&obj->dev->struct_mutex); - return ret; } /* Get rotated scanout address (only valid if already pinned), at the -- cgit v0.10.2 From d642d3acd89454006fd6ca9cc4dd57f26959f9d1 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 8 May 2015 13:32:31 +0300 Subject: drm/omap: fix align_pitch() for 24 bits per pixel align_pitch() uses ALIGN() to ensure the pitch is aligned to SGX's requirement of 8 pixels. However, ALIGN() expects the alignment value to be a power of two, which is not the case for 24 bits per pixels. Use roundup() instead, which works for all alignments. This fixes the error seen with 24 bits per pixel modes: "buffer pitch (2176 bytes) is not a multiple of pixel size (3 bytes)" Signed-off-by: Tomi Valkeinen diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 1ae8477..12081e6 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp) /* PVR needs alignment to 8 pixels.. right now that is the most * restrictive stride requirement.. */ - return ALIGN(pitch, 8 * bytespp); + return roundup(pitch, 8 * bytespp); } /* map crtc to vblank mask */ -- cgit v0.10.2 From 743c16719f671c206923d23dae4ac57edfd9483c Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 1 Jul 2014 20:52:50 +0200 Subject: drm/omap: replace ALIGN(PAGE_SIZE) by PAGE_ALIGN use mm.h definition Cc: David Airlie Cc: Tomi Valkeinen Cc: dri-devel@lists.freedesktop.org Signed-off-by: Fabian Frederick Signed-off-by: Tomi Valkeinen diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 23b5a84..720d16b 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; if (fbdev->ywrap_enabled) { /* need to align pitch to page size if using DMM scrolling */ - mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE); + mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]); } /* allocate backing bo */ -- cgit v0.10.2 From f307170d6e591a48529425b1ed6ca835790995a9 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 19 Jun 2015 17:23:37 -0500 Subject: netfilter: nf_queue: Don't recompute the hook_list head If someone sends packets from one of the netdevice ingress hooks to the a userspace queue, and then userspace later accepts the packet, the netfilter code can enter an infinite loop as the list head will never be found. Pass in the saved list_head to avoid this. Signed-off-by: "Eric W. Biederman" Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index cd60d39..8a8b2ab 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) if (verdict == NF_ACCEPT) { next_hook: - verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], + verdict = nf_iterate(entry->state.hook_list, skb, &entry->state, &elem); } -- cgit v0.10.2 From a1bc1b356a9d21bf29bc7c873718b5cacdf119b4 Mon Sep 17 00:00:00 2001 From: Bernhard Thaler Date: Sat, 20 Jun 2015 00:17:50 +0200 Subject: netfilter: bridge: fix CONFIG_NF_DEFRAG_IPV4/6 related warnings/errors br_nf_ip_fragment() is not needed when neither CONFIG_NF_DEFRAG_IPV4 nor CONFIG_NF_DEFRAG_IPV6 is set. struct brnf_frag_data must be available if either CONFIG_NF_DEFRAG_IPV4 or CONFIG_NF_DEFRAG_IPV6 is set. Fixes: efb6de9b4ba0 ("netfilter: bridge: forward IPv6 fragmented packets") Reported-by: kbuild test robot Signed-off-by: Bernhard Thaler Signed-off-by: Pablo Neira Ayuso diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index d89f4fa..8a394bd 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb) /* largest possible L2 header, see br_nf_dev_queue_xmit() */ #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) -#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct brnf_frag_data { char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; u8 encap_size; @@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb) } #endif +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb, int (*output)(struct sock *, struct sk_buff *)) { @@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb, return ip_do_fragment(sk, skb, output); } +#endif static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) { -- cgit v0.10.2 From 3bd229976f64bea64c60803f9fc8d9f0059ba2f2 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 30 Jun 2015 22:21:00 +0200 Subject: netfilter: arptables: use percpu jumpstack commit 482cfc318559 ("netfilter: xtables: avoid percpu ruleset duplication") Unlike ip and ip6tables, arp tables were never converted to use the percpu jump stack. It still uses the rule blob to store return address, which isn't safe anymore since we now share this blob among all processors. Because there is no TEE support for arptables, we don't need to cope with reentrancy, so we can use loocal variable to hold stack offset. Fixes: 482cfc318559 ("netfilter: xtables: avoid percpu ruleset duplication") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 95c9b6e..92305a1 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; - struct arpt_entry *e, *back; + struct arpt_entry *e, **jumpstack; const char *indev, *outdev; const void *table_base; + unsigned int cpu, stackidx = 0; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; @@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb, local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; + cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; + jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; e = get_entry(table_base, private->hook_entry[hook]); - back = get_entry(table_base, private->underflow[hook]); acpar.in = state->in; acpar.out = state->out; @@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb, verdict = (unsigned int)(-v) - 1; break; } - e = back; - back = get_entry(table_base, back->comefrom); + if (stackidx == 0) { + e = get_entry(table_base, + private->underflow[hook]); + } else { + e = jumpstack[--stackidx]; + e = arpt_next_entry(e); + } continue; } if (table_base + v != arpt_next_entry(e)) { - /* Save old back ptr in next entry */ - struct arpt_entry *next = arpt_next_entry(e); - next->comefrom = (void *)back - table_base; - /* set back pointer to next entry */ - back = next; + if (stackidx >= private->stacksize) { + verdict = NF_DROP; + break; + } + jumpstack[stackidx++] = e; } e = get_entry(table_base, v); -- cgit v0.10.2 From dd302b59bde0149c20df7278c0d36c765e66afbd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 30 Jun 2015 22:27:51 +0200 Subject: netfilter: bridge: don't leak skb in error paths br_nf_dev_queue_xmit must free skb in its error path. NF_DROP is misleading -- its an okfn, not a netfilter hook. Fixes: 462fb2af9788a ("bridge : Sanitize skb before it enters the IP stack") Fixes: efb6de9b4ba00 ("netfilter: bridge: forward IPv6 fragmented packets") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 8a394bd..c8b9bcf 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -744,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) struct brnf_frag_data *data; if (br_validate_ipv4(skb)) - return NF_DROP; + goto drop; IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; @@ -769,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) struct brnf_frag_data *data; if (br_validate_ipv6(skb)) - return NF_DROP; + goto drop; IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; @@ -784,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) if (v6ops) return v6ops->fragment(sk, skb, br_nf_push_frag_xmit); - else - return -EMSGSIZE; + + kfree_skb(skb); + return -EMSGSIZE; } #endif nf_bridge_info_free(skb); return br_dev_queue_push_xmit(sk, skb); + drop: + kfree_skb(skb); + return 0; } /* PF_BRIDGE/POST_ROUTING ********************************************/ -- cgit v0.10.2 From 6742b9e310bcf511b876532846e5302b07b7fedc Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 1 Jul 2015 16:14:25 +0200 Subject: netfilter: nfnetlink: keep going batch handling on missing modules After a fresh boot with no modules in place at all and a large rulesets, the existing nfnetlink_rcv_batch() funcion can take long time to commit the ruleset due to the many abort path. This is specifically a problem for the existing client of this code, ie. nf_tables, since it results in several synchronize_rcu() call in a row. This patch changes the policy to keep full batch processing on missing modules errors so we abort only once. Reported-by: Eric Leblond Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 8b117c9..0c0e8ec 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) } } +enum { + NFNL_BATCH_FAILURE = (1 << 0), + NFNL_BATCH_DONE = (1 << 1), + NFNL_BATCH_REPLAY = (1 << 2), +}; + static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, u_int16_t subsys_id) { @@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, struct net *net = sock_net(skb->sk); const struct nfnetlink_subsystem *ss; const struct nfnl_callback *nc; - bool success = true, done = false; static LIST_HEAD(err_list); + u32 status; int err; if (subsys_id >= NFNL_SUBSYS_COUNT) return netlink_ack(skb, nlh, -EINVAL); replay: + status = 0; + skb = netlink_skb_clone(oskb, GFP_KERNEL); if (!skb) return netlink_ack(oskb, nlh, -ENOMEM); @@ -336,10 +344,10 @@ replay: if (type == NFNL_MSG_BATCH_BEGIN) { /* Malformed: Batch begin twice */ nfnl_err_reset(&err_list); - success = false; + status |= NFNL_BATCH_FAILURE; goto done; } else if (type == NFNL_MSG_BATCH_END) { - done = true; + status |= NFNL_BATCH_DONE; goto done; } else if (type < NLMSG_MIN_TYPE) { err = -EINVAL; @@ -382,11 +390,8 @@ replay: * original skb. */ if (err == -EAGAIN) { - nfnl_err_reset(&err_list); - ss->abort(oskb); - nfnl_unlock(subsys_id); - kfree_skb(skb); - goto replay; + status |= NFNL_BATCH_REPLAY; + goto next; } } ack: @@ -402,7 +407,7 @@ ack: */ nfnl_err_reset(&err_list); netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); - success = false; + status |= NFNL_BATCH_FAILURE; goto done; } /* We don't stop processing the batch on errors, thus, @@ -410,19 +415,26 @@ ack: * triggers. */ if (err) - success = false; + status |= NFNL_BATCH_FAILURE; } - +next: msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) msglen = skb->len; skb_pull(skb, msglen); } done: - if (success && done) + if (status & NFNL_BATCH_REPLAY) { + ss->abort(oskb); + nfnl_err_reset(&err_list); + nfnl_unlock(subsys_id); + kfree_skb(skb); + goto replay; + } else if (status == NFNL_BATCH_DONE) { ss->commit(oskb); - else + } else { ss->abort(oskb); + } nfnl_err_deliver(&err_list, oskb); nfnl_unlock(subsys_id); -- cgit v0.10.2 From 45a481c2176f6acca2efb6477c6018b2c3e3c60f Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Mon, 29 Jun 2015 14:30:09 -0700 Subject: clk: iproc: fix memory leak from clock name of_property_read_string_index takes array of pointers and assign them to strings read from device tree property. No additional memory allocation is needed prior to calling of_property_read_string_index. In fact, since the array of pointers will be re-assigned to other strings, any memory that it points to prior to calling of_property_read_string_index will be leaked Reported-by: Dan Carpenter Signed-off-by: Ray Jui Fixes: 5fe225c105fd ("clk: iproc: add initial common clock support") Signed-off-by: Stephen Boyd diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c index e19c09c..f630e1b 100644 --- a/drivers/clk/bcm/clk-iproc-asiu.c +++ b/drivers/clk/bcm/clk-iproc-asiu.c @@ -222,10 +222,6 @@ void __init iproc_asiu_setup(struct device_node *node, struct iproc_asiu_clk *asiu_clk; const char *clk_name; - clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL); - if (WARN_ON(!clk_name)) - goto err_clk_register; - ret = of_property_read_string_index(node, "clock-output-names", i, &clk_name); if (WARN_ON(ret)) @@ -259,7 +255,7 @@ void __init iproc_asiu_setup(struct device_node *node, err_clk_register: for (i = 0; i < num_clks; i++) - kfree(asiu->clks[i].name); + clk_unregister(asiu->clk_data.clks[i]); iounmap(asiu->gate_base); err_iomap_gate: diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index 46fb84b..a8d971b 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -655,10 +655,6 @@ void __init iproc_pll_clk_setup(struct device_node *node, memset(&init, 0, sizeof(init)); parent_name = node->name; - clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL); - if (WARN_ON(!clk_name)) - goto err_clk_register; - ret = of_property_read_string_index(node, "clock-output-names", i, &clk_name); if (WARN_ON(ret)) @@ -690,10 +686,8 @@ void __init iproc_pll_clk_setup(struct device_node *node, return; err_clk_register: - for (i = 0; i < num_clks; i++) { - kfree(pll->clks[i].name); + for (i = 0; i < num_clks; i++) clk_unregister(pll->clk_data.clks[i]); - } err_pll_register: if (pll->asiu_base) -- cgit v0.10.2 From 69916d96094e1e16567c5f25515a13ed2896c730 Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Mon, 29 Jun 2015 14:30:10 -0700 Subject: clk: iproc: fix bit manipulation arithmetic A 32-bit variable should be type casted to 64-bit before arithmetic operation and assigning it to a 64-bit variable Reported-by: Dan Carpenter Signed-off-by: Ray Jui Fixes: 5fe225c105fd ("clk: iproc: add initial common clock support") Signed-off-by: Stephen Boyd diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index a8d971b..2dda4e8 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -366,7 +366,7 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw, val = readl(pll->pll_base + ctrl->ndiv_int.offset); ndiv_int = (val >> ctrl->ndiv_int.shift) & bit_mask(ctrl->ndiv_int.width); - ndiv = ndiv_int << ctrl->ndiv_int.shift; + ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift; if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { val = readl(pll->pll_base + ctrl->ndiv_frac.offset); @@ -374,7 +374,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw, bit_mask(ctrl->ndiv_frac.width); if (ndiv_frac != 0) - ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac; + ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) | + ndiv_frac; } val = readl(pll->pll_base + ctrl->pdiv.offset); -- cgit v0.10.2 From 15ab38273d21a45487116ad4c428593427954848 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Sun, 28 Jun 2015 10:55:32 +0100 Subject: clk: stm32: Fix out-by-one error path in the index lookup If stm32f4_rcc_lookup() is called with primary == 0 and secondary == 192 then it will read beyond the end of the table array due to an out-by-one error in the range check. In addition to the fixing the inequality we also modify the r.h.s. to make it even more explicit that we are comparing against the size of table in bits. Reported-by: Dan Carpenter Signed-off-by: Daniel Thompson Acked-by: Maxime Coquelin Fixes: 358bdf892f6b ("clk: stm32: Add clock driver for STM32F4[23]xxx devices") Signed-off-by: Stephen Boyd diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index b9b12a7..3f6f7ad 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -268,7 +268,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary) memcpy(table, stm32f42xx_gate_map, sizeof(table)); /* only bits set in table can be used as indices */ - if (WARN_ON(secondary > 8 * sizeof(table) || + if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) || 0 == (table[BIT_ULL_WORD(secondary)] & BIT_ULL_MASK(secondary)))) return -EINVAL; -- cgit v0.10.2 From c76a024e82bdb83a0f7d57e006f8e7f8ddf983e5 Mon Sep 17 00:00:00 2001 From: David Dueck Date: Fri, 26 Jun 2015 15:30:22 +0200 Subject: clk: at91: do not leak resources Do not leak memory and free irqs in case of an error. Acked-by: Boris Brezillon Signed-off-by: David Dueck Signed-off-by: Stephen Boyd diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c index 152dcb3..61566bc 100644 --- a/drivers/clk/at91/clk-h32mx.c +++ b/drivers/clk/at91/clk-h32mx.c @@ -116,8 +116,10 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, h32mxclk->pmc = pmc; clk = clk_register(NULL, &h32mxclk->hw); - if (!clk) + if (!clk) { + kfree(h32mxclk); return; + } of_clk_add_provider(np, of_clk_src_simple_get, clk); } diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index c240045..27dfa96 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -171,8 +171,10 @@ at91_clk_register_main_osc(struct at91_pmc *pmc, irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); ret = request_irq(osc->irq, clk_main_osc_irq_handler, IRQF_TRIGGER_HIGH, name, osc); - if (ret) + if (ret) { + kfree(osc); return ERR_PTR(ret); + } if (bypass) pmc_write(pmc, AT91_CKGR_MOR, diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index f98eafe..5b3ded5 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -165,12 +165,16 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq, irq_set_status_flags(master->irq, IRQ_NOAUTOEN); ret = request_irq(master->irq, clk_master_irq_handler, IRQF_TRIGGER_HIGH, "clk-master", master); - if (ret) + if (ret) { + kfree(master); return ERR_PTR(ret); + } clk = clk_register(NULL, &master->hw); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + free_irq(master->irq, master); kfree(master); + } return clk; } diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 6ec79db..23163be 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -338,12 +338,16 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name, irq_set_status_flags(pll->irq, IRQ_NOAUTOEN); ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH, id ? "clk-pllb" : "clk-plla", pll); - if (ret) + if (ret) { + kfree(pll); return ERR_PTR(ret); + } clk = clk_register(NULL, &pll->hw); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + free_irq(pll->irq, pll); kfree(pll); + } return clk; } diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c index a76d03f..58008b3 100644 --- a/drivers/clk/at91/clk-system.c +++ b/drivers/clk/at91/clk-system.c @@ -130,13 +130,17 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name, irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); ret = request_irq(sys->irq, clk_system_irq_handler, IRQF_TRIGGER_HIGH, name, sys); - if (ret) + if (ret) { + kfree(sys); return ERR_PTR(ret); + } } clk = clk_register(NULL, &sys->hw); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + free_irq(sys->irq, sys); kfree(sys); + } return clk; } diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c index ae3263b..30dd697 100644 --- a/drivers/clk/at91/clk-utmi.c +++ b/drivers/clk/at91/clk-utmi.c @@ -118,12 +118,16 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq, irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN); ret = request_irq(utmi->irq, clk_utmi_irq_handler, IRQF_TRIGGER_HIGH, "clk-utmi", utmi); - if (ret) + if (ret) { + kfree(utmi); return ERR_PTR(ret); + } clk = clk_register(NULL, &utmi->hw); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + free_irq(utmi->irq, utmi); kfree(utmi); + } return clk; } -- cgit v0.10.2 From 25c14ef86a0d5ec9320e833685c15bc96f504864 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan <_govind@gmx.com> Date: Wed, 1 Jul 2015 14:21:57 +0530 Subject: enic: fix issues in enic_poll In enic_poll, we clean tx and rx queues, when low latency busy socket polling is happening, enic_poll will only clean tx queue. After cleaning tx, it should return total budget for re-poll. There is a small window between vnic_intr_unmask() and enic_poll_unlock_napi(). In this window if an irq occurs and napi is scheduled on different cpu, it tries to acquire enic_poll_lock_napi() and fails. Unlock napi_poll before unmasking the interrupt. v2: Do not change tx wonk done behaviour. Consider only rx work done for completing napi. Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index da2004e..918a8e4 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget) wq_work_done, 0 /* dont unmask intr */, 0 /* dont reset intr timer */); - return rq_work_done; + return budget; } if (budget > 0) @@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + enic_poll_unlock_napi(&enic->rq[cq_rq], napi); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget) napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[cq_rq], napi); return rq_work_done; } -- cgit v0.10.2 From 462e1ead9296a8452499fb10cf3b51903ffe24ac Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 2 Jul 2015 05:48:17 -0700 Subject: bridge: vlan: fix usage of vlan 0 and 4095 again Vlan ids 0 and 4095 were disallowed by commit: 8adff41c3d25 ("bridge: Don't use VID 0 and 4095 in vlan filtering") but then the check was removed when vlan ranges were introduced by: bdced7ef7838 ("bridge: support for multiple vlans and vlan ranges in setlink and dellink requests") So reintroduce the vlan range check. Before patch: [root@testvm ~]# bridge vlan add vid 0 dev eth0 master (succeeds) After Patch: [root@testvm ~]# bridge vlan add vid 0 dev eth0 master RTNETLINK answers: Invalid argument Signed-off-by: Nikolay Aleksandrov Fixes: bdced7ef7838 ("bridge: support for multiple vlans and vlan ranges in setlink and dellink requests") Acked-by: Toshiaki Makita Signed-off-by: David S. Miller diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 6b67ed3..364bdc9 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -457,6 +457,8 @@ static int br_afspec(struct net_bridge *br, if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo = nla_data(attr); + if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) + return -EINVAL; if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { if (vinfo_start) return -EINVAL; -- cgit v0.10.2 From 13ee9fdba96577eb1583dcd7b15767ef623fae12 Mon Sep 17 00:00:00 2001 From: Szabolcs Nagy Date: Wed, 1 Jul 2015 23:08:10 +0100 Subject: ARM: 8397/1: fix vdsomunge not to depend on glibc specific error.h If the host toolchain is not glibc based then the arm kernel build fails with arch/arm/vdso/vdsomunge.c:53:19: fatal error: error.h: No such file or directory error.h is a glibc only header (ie not available in musl, newlib and bsd libcs). Changed the error reporting to standard conforming code to avoid depending on specific C implementations. Signed-off-by: Szabolcs Nagy Acked-by: Will Deacon Fixes: 8512287a8165 ("ARM: 8330/1: add VDSO user-space code") Cc: stable@vger.kernel.org Signed-off-by: Nathan Lynch Signed-off-by: Russell King diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c index 9005b07..aedec81 100644 --- a/arch/arm/vdso/vdsomunge.c +++ b/arch/arm/vdso/vdsomunge.c @@ -45,13 +45,11 @@ * it does. */ -#define _GNU_SOURCE - #include #include #include -#include #include +#include #include #include #include @@ -82,11 +80,25 @@ #define EF_ARM_ABI_FLOAT_HARD 0x400 #endif +static int failed; +static const char *argv0; static const char *outfile; +static void fail(const char *fmt, ...) +{ + va_list ap; + + failed = 1; + fprintf(stderr, "%s: ", argv0); + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(EXIT_FAILURE); +} + static void cleanup(void) { - if (error_message_count > 0 && outfile != NULL) + if (failed && outfile != NULL) unlink(outfile); } @@ -119,68 +131,66 @@ int main(int argc, char **argv) int infd; atexit(cleanup); + argv0 = argv[0]; if (argc != 3) - error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]); + fail("Usage: %s [infile] [outfile]\n", argv[0]); infile = argv[1]; outfile = argv[2]; infd = open(infile, O_RDONLY); if (infd < 0) - error(EXIT_FAILURE, errno, "Cannot open %s", infile); + fail("Cannot open %s: %s\n", infile, strerror(errno)); if (fstat(infd, &stat) != 0) - error(EXIT_FAILURE, errno, "Failed stat for %s", infile); + fail("Failed stat for %s: %s\n", infile, strerror(errno)); inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); if (inbuf == MAP_FAILED) - error(EXIT_FAILURE, errno, "Failed to map %s", infile); + fail("Failed to map %s: %s\n", infile, strerror(errno)); close(infd); inhdr = inbuf; if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) - error(EXIT_FAILURE, 0, "Not an ELF file"); + fail("Not an ELF file\n"); if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) - error(EXIT_FAILURE, 0, "Unsupported ELF class"); + fail("Unsupported ELF class\n"); swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; if (read_elf_half(inhdr->e_type, swap) != ET_DYN) - error(EXIT_FAILURE, 0, "Not a shared object"); + fail("Not a shared object\n"); - if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) { - error(EXIT_FAILURE, 0, "Unsupported architecture %#x", - inhdr->e_machine); - } + if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) + fail("Unsupported architecture %#x\n", inhdr->e_machine); e_flags = read_elf_word(inhdr->e_flags, swap); if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { - error(EXIT_FAILURE, 0, "Unsupported EABI version %#x", - EF_ARM_EABI_VERSION(e_flags)); + fail("Unsupported EABI version %#x\n", + EF_ARM_EABI_VERSION(e_flags)); } if (e_flags & EF_ARM_ABI_FLOAT_HARD) - error(EXIT_FAILURE, 0, - "Unexpected hard-float flag set in e_flags"); + fail("Unexpected hard-float flag set in e_flags\n"); clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); if (outfd < 0) - error(EXIT_FAILURE, errno, "Cannot open %s", outfile); + fail("Cannot open %s: %s\n", outfile, strerror(errno)); if (ftruncate(outfd, stat.st_size) != 0) - error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile); + fail("Cannot truncate %s: %s\n", outfile, strerror(errno)); outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, outfd, 0); if (outbuf == MAP_FAILED) - error(EXIT_FAILURE, errno, "Failed to map %s", outfile); + fail("Failed to map %s: %s\n", outfile, strerror(errno)); close(outfd); @@ -195,7 +205,7 @@ int main(int argc, char **argv) } if (msync(outbuf, stat.st_size, MS_SYNC) != 0) - error(EXIT_FAILURE, errno, "Failed to sync %s", outfile); + fail("Failed to sync %s: %s\n", outfile, strerror(errno)); return EXIT_SUCCESS; } -- cgit v0.10.2 From 11b8b25ce4f8acfd3b438683c0c9ade27756c6e8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 3 Jul 2015 12:42:36 +0100 Subject: ARM: fix lockdep unannotated irqs-off warning Wolfram Sang reported an unannotated irqs-off warning from lockdep: WARNING: CPU: 0 PID: 282 at kernel/locking/lockdep.c:3557 check_flags+0x84/0x1f4() DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled) CPU: 0 PID: 282 Comm: rcS Tainted: G W 4.1.0-00002-g5b076054611833 #179 Hardware name: Generic Emma Mobile EV2 (Flattened Device Tree) Backtrace: [] (dump_backtrace) from [] (show_stack+0x18/0x1c) r6:c02dcc67 r5:00000009 r4:00000000 r3:00400000 [] (show_stack) from [] (dump_stack+0x20/0x28) [] (dump_stack) from [] (warn_slowpath_common+0x8c/0xb4) [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x38/0x40) r8:c780f470 r7:00000000 r6:00000000 r5:c03b0570 r4:c0b7ec04 [] (warn_slowpath_fmt) from [] (check_flags+0x84/0x1f4) r3:c02e13d8 r2:c02dceaa [] (check_flags) from [] (lock_acquire+0x4c/0xbc) r5:00000000 r4:60000193 [] (lock_acquire) from [] (_raw_spin_lock+0x34/0x44) r9:000a8d5c r8:00000001 r7:c7806000 r6:c780f460 r5:c03b06a0 r4:c780f460 [] (_raw_spin_lock) from [] (handle_fasteoi_irq+0x20/0x11c) r4:c780f400 [] (handle_fasteoi_irq) from [] (generic_handle_irq+0x28/0x38) r6:00000000 r5:c03b038c r4:00000012 r3:c005a8ac [] (generic_handle_irq) from [] (__handle_domain_irq+0x88/0xa8) r4:00000000 r3:00000026 [] (__handle_domain_irq) from [] (gic_handle_irq+0x40/0x58) r8:10c5347d r7:10c5347d r6:c35b1fb0 r5:c03a6304 r4:c8802000 r3:c35b1fb0 [] (gic_handle_irq) from [] (__irq_usr+0x48/0x60) Exception stack(0xc35b1fb0 to 0xc35b1ff8) 1fa0: 00000061 00000000 000ab736 00000066 1fc0: 00000061 000aa1f0 000a8d54 000a8d54 000a8d88 000a8d5c 000a8cc8 000a8d68 1fe0: 72727272 bef8a528 000398c0 00031334 20000010 ffffffff r6:ffffffff r5:20000010 r4:00031334 r3:00000061 ---[ end trace cb88537fdc8fa202 ]--- possible reason: unannotated irqs-off. irq event stamp: 769 hardirqs last enabled at (769): [] ret_fast_syscall+0x2c/0x54 hardirqs last disabled at (768): [] ret_fast_syscall+0xc/0x54 softirqs last enabled at (0): [] copy_process.part.65+0x2e8/0x11dc softirqs last disabled at (0): [< (null)>] (null) His kernel configuration had: CONFIG_PROVE_LOCKING=y CONFIG_TRACE_IRQFLAGS=y but no IRQSOFF_TRACER, which means entry from userspace can result in the kernel seeing IRQs off without being notified of that change of state. Change the IRQSOFF ifdef in the usr_entry macro to TRACE_IRQFLAGS instead. Tested-by: Wolfram Sang Signed-off-by: Russell King diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 570306c..dba6cf6 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -408,7 +408,7 @@ ENDPROC(__fiq_abt) zero_fp .if \trace -#ifdef CONFIG_IRQSOFF_TRACER +#ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif ct_user_exit save = 0 -- cgit v0.10.2 From ac5e2f170f033e48cfcdc2c4f74b27083eabffa5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 1 Jul 2015 10:02:39 +0100 Subject: ARM: io: document ARM specific behaviour of ioremap*() implementations Add documentation of the ARM specific behaviour of the mappings setup by the ioremap() series of macros. Signed-off-by: Russell King diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 1c3938f..ae10717 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -348,11 +348,47 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from, #endif /* readl */ /* - * ioremap and friends. + * ioremap() and friends. * - * ioremap takes a PCI memory address, as specified in - * Documentation/io-mapping.txt. + * ioremap() takes a resource address, and size. Due to the ARM memory + * types, it is important to use the correct ioremap() function as each + * mapping has specific properties. * + * Function Memory type Cacheability Cache hint + * ioremap() Device n/a n/a + * ioremap_nocache() Device n/a n/a + * ioremap_cache() Normal Writeback Read allocate + * ioremap_wc() Normal Non-cacheable n/a + * ioremap_wt() Normal Non-cacheable n/a + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" + * - writes may be delayed before they hit the endpoint device + * + * ioremap_nocache() is the same as ioremap() as there are too many device + * drivers using this for device registers, and documentation which tells + * people to use it for such for this to be any different. This is not a + * safe fallback for memory-like mappings, or memory regions where the + * compiler may generate unaligned accesses - eg, via inlining its own + * memcpy. + * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * - ordering is not guaranteed without explicit dependencies or barrier + * instructions + * - writes may be delayed before they hit the endpoint memory + * + * The cache hint is only a performance hint: CPUs may alias these hints. + * Eg, a CPU not implementing read allocate but implementing write allocate + * will provide a write allocate mapping instead. */ #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -- cgit v0.10.2 From 1e2c727f6c022778d4562147433ca4c0b101f0ad Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 1 Jul 2015 10:17:55 +0100 Subject: ARM: io: fix ioremap_wt() implementation ioremap_wt() was added by aliasing it to ioremap_nocache(), which is a device mapping. Device mappings do not allow unaligned accesses, but it appears that GCC is able to inline its own memcpy() implementation which may use such accesses. The only user of this is pmem, which uses memcpy() on the region. Therefore, this is unsafe. We must implement ioremap_wt() correctly for ARM, or not at all. This patch adds a more correct implementation by re-using ioremap_wc() to provide a normal-memory non-cacheable mapping. Signed-off-by: Russell King diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index ae10717..f1083eb 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -394,7 +394,7 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from, #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) #define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) -#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) +#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) #define iounmap __arm_iounmap /* -- cgit v0.10.2 From 20a1080dff2f1be8933baa0d910c41882c7279ee Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 1 Jul 2015 10:06:32 +0100 Subject: ARM: io: convert ioremap*() to functions Convert the ioremap*() preprocessor macros to real functions, moving them out of line. This allows us to kill off __arm_ioremap(), and __arm_iounmap() helpers, and remove __arm_ioremap_pfn_caller() from global view. Signed-off-by: Russell King diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index f1083eb..a5ed237 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) * The _caller variety takes a __builtin_return_address(0) value for * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, - size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); - extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); -extern void __arm_iounmap(volatile void __iomem *addr); extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); @@ -390,12 +385,19 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from, * Eg, a CPU not implementing read allocate but implementing write allocate * will provide a write allocate mapping instead. */ -#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) -#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) -#define iounmap __arm_iounmap +void __iomem *ioremap(resource_size_t res_cookie, size_t size); +#define ioremap ioremap +#define ioremap_nocache ioremap + +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); +#define ioremap_cache ioremap_cache + +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); +#define ioremap_wc ioremap_wc +#define ioremap_wt ioremap_wc + +void iounmap(volatile void __iomem *iomem_cookie); +#define iounmap iounmap /* * io{read,write}{16,32}be() macros diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index d1e5ad7..0c81056 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, } #endif -void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, +static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; @@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) { return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, - __builtin_return_address(0)); + __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap_pfn); @@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *) = __arm_ioremap_caller; -void __iomem * -__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) +void __iomem *ioremap(resource_size_t res_cookie, size_t size) +{ + return arch_ioremap_caller(res_cookie, size, MT_DEVICE, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap); + +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) +{ + return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); + +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { - return arch_ioremap_caller(phys_addr, size, mtype, - __builtin_return_address(0)); + return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC, + __builtin_return_address(0)); } -EXPORT_SYMBOL(__arm_ioremap); +EXPORT_SYMBOL(ioremap_wc); /* * Remap an arbitrary physical address space into the kernel virtual @@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr) void (*arch_iounmap)(volatile void __iomem *) = __iounmap; -void __arm_iounmap(volatile void __iomem *io_addr) +void iounmap(volatile void __iomem *cookie) { - arch_iounmap(io_addr); + arch_iounmap(cookie); } -EXPORT_SYMBOL(__arm_iounmap); +EXPORT_SYMBOL(iounmap); #ifdef CONFIG_PCI static int pci_ioremap_mem_type = MT_DEVICE; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index afd7e05..1dd1093 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, } EXPORT_SYMBOL(__arm_ioremap_pfn); -void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, - size_t size, unsigned int mtype, void *caller) +void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, + unsigned int mtype, void *caller) { - return __arm_ioremap_pfn(pfn, offset, size, mtype); + return (void __iomem *)phys_addr; } -void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, - unsigned int mtype) +void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); + +void __iomem *ioremap(resource_size_t res_cookie, size_t size) { - return (void __iomem *)phys_addr; + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE, + __builtin_return_address(0)); } -EXPORT_SYMBOL(__arm_ioremap); +EXPORT_SYMBOL(ioremap); -void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) +{ + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); -void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, - unsigned int mtype, void *caller) +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) +{ + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wc); + +void __iounmap(volatile void __iomem *addr) { - return __arm_ioremap(phys_addr, size, mtype); } +EXPORT_SYMBOL(__iounmap); void (*arch_iounmap)(volatile void __iomem *); -void __arm_iounmap(volatile void __iomem *addr) +void iounmap(volatile void __iomem *addr) { } -EXPORT_SYMBOL(__arm_iounmap); +EXPORT_SYMBOL(iounmap); -- cgit v0.10.2 From 9ab79bb22cc77adcca5ebbadea6caec6a478f283 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 1 Jul 2015 15:23:10 +0100 Subject: ARM: pgtable: document mapping types Signed-off-by: Russell King diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index bfd662e..aeddd28 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -129,7 +129,36 @@ /* * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B + * ARMv6+ without TEX remapping, they are a table index. + * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B + * + * MT type Pre-ARMv6 ARMv6+ type / cacheable status + * UNCACHED Uncached Strongly ordered + * BUFFERABLE Bufferable Normal memory / non-cacheable + * WRITETHROUGH Writethrough Normal memory / write through + * WRITEBACK Writeback Normal memory / write back, read alloc + * MINICACHE Minicache N/A + * WRITEALLOC Writeback Normal memory / write back, write alloc + * DEV_SHARED Uncached Device memory (shared) + * DEV_NONSHARED Uncached Device memory (non-shared) + * DEV_WC Bufferable Normal memory / non-cacheable + * DEV_CACHED Writeback Normal memory / write back, read alloc + * VECTORS Variable Normal memory / variable + * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" */ #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ -- cgit v0.10.2 From 87775312a86bcf213e3b21f6f7c79e2e00d96f7b Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 2 Jul 2015 16:30:24 +0200 Subject: net-ipv6: Delete an unnecessary check before the function call "free_percpu" The free_percpu() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: David S. Miller diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1a1122a..6090969 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -369,10 +369,7 @@ static void ip6_dst_destroy(struct dst_entry *dst) struct inet6_dev *idev; dst_destroy_metrics_generic(dst); - - if (rt->rt6i_pcpu) - free_percpu(rt->rt6i_pcpu); - + free_percpu(rt->rt6i_pcpu); rt6_uncached_list_del(rt); idev = rt->rt6i_idev; -- cgit v0.10.2 From f6e1c91666990d06ba37c76129495c724202144d Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 2 Jul 2015 17:58:21 +0200 Subject: net-RDS: Delete an unnecessary check before the function call "module_put" The module_put() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: David S. Miller diff --git a/net/rds/transport.c b/net/rds/transport.c index 8b4a6cd..83498e1 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(rds_trans_unregister); void rds_trans_put(struct rds_transport *trans) { - if (trans && trans->t_owner) + if (trans) module_put(trans->t_owner); } -- cgit v0.10.2 From 92b80eb33c52583b48ed289bd993579b87b52d9a Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 2 Jul 2015 18:38:12 +0200 Subject: netlink: Delete an unnecessary check before the function call "module_put" The module_put() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: David S. Miller diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index dea9253..9a0ae71 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -158,7 +158,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt) out: spin_unlock(&netlink_tap_lock); - if (found && nt->module) + if (found) module_put(nt->module); return found ? 0 : -ENODEV; -- cgit v0.10.2 From 4c938d22c88a9ddccc8c55a85e0430e9c62b1ac5 Mon Sep 17 00:00:00 2001 From: Angga Date: Fri, 3 Jul 2015 14:40:52 +1200 Subject: ipv6: Make MLD packets to only be processed locally Before commit daad151263cf ("ipv6: Make ipv6_is_mld() inline and use it from ip6_mc_input().") MLD packets were only processed locally. After the change, a copy of MLD packet goes through ip6_mr_input, causing MRT6MSG_NOCACHE message to be generated to user space. Make MLD packet only processed locally. Fixes: daad151263cf ("ipv6: Make ipv6_is_mld() inline and use it from ip6_mc_input().") Signed-off-by: Hermin Anggawijaya Signed-off-by: David S. Miller diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index f2e464e..57990c9 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb) if (offset < 0) goto out; - if (!ipv6_is_mld(skb, nexthdr, offset)) - goto out; + if (ipv6_is_mld(skb, nexthdr, offset)) + deliver = true; - deliver = true; + goto out; } /* unknown RA - process it normally */ } -- cgit v0.10.2 From fda8b18c515a5e2caf821887ceafb42c35094eaf Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 3 Jul 2015 16:10:51 +0530 Subject: cxgb4: Fix incorrect sequence numbers shown in devlog Part of commit 49aa284fe64c4c1 ("cxgb4: Add support for devlog") change introduced a real bug where the Device Log Sequence Numbers are no longer being converted from firmware Big-Endian to local CPU-Endian format. This patch moves all of the translation into the devlog_show() routine. The only endianness code now in devlog_open() is the small loop to find the earliest (lowest Sequence Number) Device Log entry in the circular buffer. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 484eb8c..a11485f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -952,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v) * eventually have to put a format interpreter in here ... */ seq_printf(seq, "%10d %15llu %8s %8s ", - e->seqno, e->timestamp, + be32_to_cpu(e->seqno), + be64_to_cpu(e->timestamp), (e->level < ARRAY_SIZE(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < ARRAY_SIZE(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); - seq_printf(seq, e->fmt, e->params[0], e->params[1], - e->params[2], e->params[3], e->params[4], - e->params[5], e->params[6], e->params[7]); + seq_printf(seq, e->fmt, + be32_to_cpu(e->params[0]), + be32_to_cpu(e->params[1]), + be32_to_cpu(e->params[2]), + be32_to_cpu(e->params[3]), + be32_to_cpu(e->params[4]), + be32_to_cpu(e->params[5]), + be32_to_cpu(e->params[6]), + be32_to_cpu(e->params[7])); } return 0; } @@ -1043,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file) return ret; } - /* Translate log multi-byte integral elements into host native format - * and determine where the first entry in the log is. + /* Find the earliest (lowest Sequence Number) log entry in the + * circular Device Log. */ for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { struct fw_devlog_e *e = &dinfo->log[index]; - int i; __u32 seqno; if (e->timestamp == 0) continue; - e->timestamp = (__force __be64)be64_to_cpu(e->timestamp); seqno = be32_to_cpu(e->seqno); - for (i = 0; i < 8; i++) - e->params[i] = - (__force __be32)be32_to_cpu(e->params[i]); - if (seqno < fseqno) { fseqno = seqno; dinfo->first = index; -- cgit v0.10.2 From 1bd46782d08b01b73df0085b51ea1021b19b44fd Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 3 Jul 2015 15:22:54 +0100 Subject: ARM: avoid unwanted GCC memset()/memcpy() optimisations for IO variants We don't want GCC optimising our memset_io(), memcpy_fromio() or memcpy_toio() variants, so we must not call one of the standard functions. Provide a separate name for our assembly memcpy() and memset() functions, and use that instead, thereby bypassing GCC's ability to optimise these operations. GCCs optimisation may introduce unaligned accesses which are invalid for device mappings. Signed-off-by: Russell King diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index a5ed237..4859820 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -316,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t); static inline void memset_io(volatile void __iomem *dst, unsigned c, size_t count) { - memset((void __force *)dst, c, count); + extern void mmioset(void *, unsigned int, size_t); + mmioset((void __force *)dst, c, count); } #define memset_io(dst,c,count) memset_io(dst,c,count) static inline void memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - memcpy(to, (const void __force *)from, count); + extern void mmiocpy(void *, const void *, size_t); + mmiocpy(to, (const void __force *)from, count); } #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) static inline void memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - memcpy((void __force *)to, from, count); + extern void mmiocpy(void *, const void *, size_t); + mmiocpy((void __force *)to, from, count); } #define memcpy_toio(to,from,count) memcpy_toio(to,from,count) diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index a88671c..5e5a51a 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void); extern void fpundefinstr(void); +void mmioset(void *, unsigned int, size_t); +void mmiocpy(void *, const void *, size_t); + /* platform dependent support */ EXPORT_SYMBOL(arm_delay_ops); @@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(__memzero); +EXPORT_SYMBOL(mmioset); +EXPORT_SYMBOL(mmiocpy); + #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 7797e81..64111bd 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -61,8 +61,10 @@ /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ +ENTRY(mmiocpy) ENTRY(memcpy) #include "copy_template.S" ENDPROC(memcpy) +ENDPROC(mmiocpy) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index a4ee97b..3c65e3b 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -16,6 +16,7 @@ .text .align 5 +ENTRY(mmioset) ENTRY(memset) UNWIND( .fnstart ) ands r3, r0, #3 @ 1 unaligned? @@ -133,3 +134,4 @@ UNWIND( .fnstart ) b 1b UNWIND( .fnend ) ENDPROC(memset) +ENDPROC(mmioset) -- cgit v0.10.2 From 9705acd63b125dee8b15c705216d7186daea4625 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Fri, 3 Jul 2015 21:13:55 -0400 Subject: ext4: fix reservation release on invalidatepage for delalloc fs On delalloc enabled file system on invalidatepage operation in ext4_da_page_release_reservation() we want to clear the delayed buffer and remove the extent covering the delayed buffer from the extent status tree. However currently there is a bug where on the systems with page size > block size we will always remove extents from the start of the page regardless where the actual delayed buffers are positioned in the page. This leads to the errors like this: EXT4-fs warning (device loop0): ext4_da_release_space:1225: ext4_da_release_space: ino 13, to_free 1 with only 0 reserved data blocks This however can cause data loss on writeback time if the file system is in ENOSPC condition because we're releasing reservation for someones else delayed buffer. Fix this by only removing extents that corresponds to the part of the page we want to invalidate. This problem is reproducible by the following fio receipt (however I was only able to reproduce it with fio-2.1 or older. [global] bs=8k iodepth=1024 iodepth_batch=60 randrepeat=1 size=1m directory=/mnt/test numjobs=20 [job1] ioengine=sync bs=1k direct=1 rw=randread filename=file1:file2 [job2] ioengine=libaio rw=randwrite direct=1 filename=file1:file2 [job3] bs=1k ioengine=posixaio rw=randwrite direct=1 filename=file1:file2 [job5] bs=1k ioengine=sync rw=randread filename=file1:file2 [job7] ioengine=libaio rw=randwrite filename=file1:file2 [job8] ioengine=posixaio rw=randwrite filename=file1:file2 [job10] ioengine=mmap rw=randwrite bs=1k filename=file1:file2 [job11] ioengine=mmap rw=randwrite direct=1 filename=file1:file2 Signed-off-by: Lukas Czerner Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara Cc: stable@vger.kernel.org diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4ad73d3..2334e86 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1330,7 +1330,7 @@ static void ext4_da_page_release_reservation(struct page *page, unsigned int offset, unsigned int length) { - int to_release = 0; + int to_release = 0, contiguous_blks = 0; struct buffer_head *head, *bh; unsigned int curr_off = 0; struct inode *inode = page->mapping->host; @@ -1351,14 +1351,23 @@ static void ext4_da_page_release_reservation(struct page *page, if ((offset <= curr_off) && (buffer_delay(bh))) { to_release++; + contiguous_blks++; clear_buffer_delay(bh); + } else if (contiguous_blks) { + lblk = page->index << + (PAGE_CACHE_SHIFT - inode->i_blkbits); + lblk += (curr_off >> inode->i_blkbits) - + contiguous_blks; + ext4_es_remove_extent(inode, lblk, contiguous_blks); + contiguous_blks = 0; } curr_off = next_off; } while ((bh = bh->b_this_page) != head); - if (to_release) { + if (contiguous_blks) { lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - ext4_es_remove_extent(inode, lblk, to_release); + lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; + ext4_es_remove_extent(inode, lblk, contiguous_blks); } /* If we have released all the blocks belonging to a cluster, then we -- cgit v0.10.2 From d6f123a9297496ad0b6335fe881504c4b5b2a5e5 Mon Sep 17 00:00:00 2001 From: Eryu Guan Date: Fri, 3 Jul 2015 23:56:50 -0400 Subject: ext4: be more strict when migrating to non-extent based file Currently the check in ext4_ind_migrate() is not enough before doing the real conversion: a) delayed allocated extents could bypass the check on eh->eh_entries and eh->eh_depth This can be demonstrated by this script xfs_io -fc "pwrite 0 4k" -c "pwrite 8k 4k" /mnt/ext4/testfile chattr -e /mnt/ext4/testfile where testfile has two extents but still be converted to non-extent based file format. b) only extent length is checked but not the offset, which would result in data lose (delalloc) or fs corruption (nodelalloc), because non-extent based file only supports at most (12 + 2^10 + 2^20 + 2^30) blocks This can be demostrated by xfs_io -fc "pwrite 5T 4k" /mnt/ext4/testfile chattr -e /mnt/ext4/testfile sync If delalloc is enabled, dmesg prints EXT4-fs warning (device dm-4): ext4_block_to_path:105: block 1342177280 > max in inode 53 EXT4-fs (dm-4): Delayed block allocation failed for inode 53 at logical offset 1342177280 with max blocks 1 with error 5 EXT4-fs (dm-4): This should not happen!! Data will be lost If delalloc is disabled, e2fsck -nf shows corruption Inode 53, i_size is 5497558142976, should be 4096. Fix? no Fix the two issues by a) forcing all delayed allocation blocks to be allocated before checking eh->eh_depth and eh->eh_entries b) limiting the last logical block of the extent is within direct map Signed-off-by: Eryu Guan Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index b52374e..6d8b0c9 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode) struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; + ext4_lblk_t end; ext4_fsblk_t blk; handle_t *handle; int ret; @@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode) EXT4_FEATURE_RO_COMPAT_BIGALLOC)) return -EOPNOTSUPP; + /* + * In order to get correct extent info, force all delayed allocation + * blocks to be allocated, otherwise delayed allocation blocks may not + * be reflected and bypass the checks on extent header. + */ + if (test_opt(inode->i_sb, DELALLOC)) + ext4_alloc_da_blocks(inode); + handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -654,7 +663,8 @@ int ext4_ind_migrate(struct inode *inode) else { len = le16_to_cpu(ex->ee_len); blk = ext4_ext_pblock(ex); - if (len > EXT4_NDIR_BLOCKS) { + end = le32_to_cpu(ex->ee_block) + len - 1; + if (end >= EXT4_NDIR_BLOCKS) { ret = -EOPNOTSUPP; goto errout; } -- cgit v0.10.2 From 8974fec7d72e3e02752fe0f27b4c3719c78d9a15 Mon Sep 17 00:00:00 2001 From: Eryu Guan Date: Sat, 4 Jul 2015 00:03:44 -0400 Subject: ext4: correctly migrate a file with a hole at the beginning Currently ext4_ind_migrate() doesn't correctly handle a file which contains a hole at the beginning of the file. This caused the migration to be done incorrectly, and then if there is a subsequent following delayed allocation write to the "hole", this would reclaim the same data blocks again and results in fs corruption. # assmuing 4k block size ext4, with delalloc enabled # skip the first block and write to the second block xfs_io -fc "pwrite 4k 4k" -c "fsync" /mnt/ext4/testfile # converting to indirect-mapped file, which would move the data blocks # to the beginning of the file, but extent status cache still marks # that region as a hole chattr -e /mnt/ext4/testfile # delayed allocation writes to the "hole", reclaim the same data block # again, results in i_blocks corruption xfs_io -c "pwrite 0 4k" /mnt/ext4/testfile umount /mnt/ext4 e2fsck -nf /dev/sda6 ... Inode 53, i_blocks is 16, should be 8. Fix? no ... Signed-off-by: Eryu Guan Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 6d8b0c9..6163ad2 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -620,7 +620,7 @@ int ext4_ind_migrate(struct inode *inode) struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; - ext4_lblk_t end; + ext4_lblk_t start, end; ext4_fsblk_t blk; handle_t *handle; int ret; @@ -659,11 +659,12 @@ int ext4_ind_migrate(struct inode *inode) goto errout; } if (eh->eh_entries == 0) - blk = len = 0; + blk = len = start = end = 0; else { len = le16_to_cpu(ex->ee_len); blk = ext4_ext_pblock(ex); - end = le32_to_cpu(ex->ee_block) + len - 1; + start = le32_to_cpu(ex->ee_block); + end = start + len - 1; if (end >= EXT4_NDIR_BLOCKS) { ret = -EOPNOTSUPP; goto errout; @@ -672,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode) ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); memset(ei->i_data, 0, sizeof(ei->i_data)); - for (i=0; i < len; i++) + for (i = start; i <= end; i++) ei->i_data[i] = cpu_to_le32(blk++); ext4_mark_inode_dirty(handle, inode); errout: -- cgit v0.10.2 From 0484edadfae3202ab165a2bff4936b80bcf1bebf Mon Sep 17 00:00:00 2001 From: Tiberiu Breana Date: Wed, 1 Jul 2015 15:32:00 +0300 Subject: iio: light: STK3310: un-invert proximity values In accordance with the recent proximity ABI changes, STK3310's proximity readings should be un-inversed in order to return low values for far-away objects and high values for close ones. As consequences of this change, iio event directions have been switched and maximum proximity sensor reference values have also been adjusted in accordance with the real readings. Signed-off-by: Tiberiu Breana Acked-by: Hartmut Knaack Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index fee4297..c1a2182 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -43,7 +43,6 @@ #define STK3311_CHIP_ID_VAL 0x1D #define STK3310_PSINT_EN 0x01 #define STK3310_PS_MAX_VAL 0xFFFF -#define STK3310_THRESH_MAX 0xFFFF #define STK3310_DRIVER_NAME "stk3310" #define STK3310_REGMAP_NAME "stk3310_regmap" @@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint = REG_FIELD(STK3310_REG_FLAG, 4, 4); static const struct reg_field stk3310_reg_field_flag_nf = REG_FIELD(STK3310_REG_FLAG, 0, 0); -/* - * Maximum PS values with regard to scale. Used to export the 'inverse' - * PS value (high values for far objects, low values for near objects). - */ + +/* Estimate maximum proximity values with regard to measurement scale. */ static const int stk3310_ps_max[4] = { - STK3310_PS_MAX_VAL / 64, - STK3310_PS_MAX_VAL / 16, - STK3310_PS_MAX_VAL / 4, - STK3310_PS_MAX_VAL, + STK3310_PS_MAX_VAL / 640, + STK3310_PS_MAX_VAL / 160, + STK3310_PS_MAX_VAL / 40, + STK3310_PS_MAX_VAL / 10 }; static const int stk3310_scale_table[][2] = { @@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = { /* Proximity event */ { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, /* Out-of-proximity event */ { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_RISING, + .dir = IIO_EV_DIR_FALLING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, @@ -205,23 +202,16 @@ static int stk3310_read_event(struct iio_dev *indio_dev, u8 reg; u16 buf; int ret; - unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); if (info != IIO_EV_INFO_VALUE) return -EINVAL; - /* - * Only proximity interrupts are implemented at the moment. - * Since we're inverting proximity values, the sensor's 'high' - * threshold will become our 'low' threshold, associated with - * 'near' events. Similarly, the sensor's 'low' threshold will - * be our 'high' threshold, associated with 'far' events. - */ + /* Only proximity interrupts are implemented at the moment. */ if (dir == IIO_EV_DIR_RISING) - reg = STK3310_REG_THDL_PS; - else if (dir == IIO_EV_DIR_FALLING) reg = STK3310_REG_THDH_PS; + else if (dir == IIO_EV_DIR_FALLING) + reg = STK3310_REG_THDL_PS; else return -EINVAL; @@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, dev_err(&data->client->dev, "register read failed\n"); return ret; } - regmap_field_read(data->reg_ps_gain, &index); - *val = swab16(stk3310_ps_max[index] - buf); + *val = swab16(buf); return IIO_VAL_INT; } @@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev, return -EINVAL; if (dir == IIO_EV_DIR_RISING) - reg = STK3310_REG_THDL_PS; - else if (dir == IIO_EV_DIR_FALLING) reg = STK3310_REG_THDH_PS; + else if (dir == IIO_EV_DIR_FALLING) + reg = STK3310_REG_THDL_PS; else return -EINVAL; - buf = swab16(stk3310_ps_max[index] - val); + buf = swab16(val); ret = regmap_bulk_write(data->regmap, reg, &buf, 2); if (ret < 0) dev_err(&client->dev, "failed to set PS threshold!\n"); @@ -334,14 +323,6 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, return ret; } *val = swab16(buf); - if (chan->type == IIO_PROXIMITY) { - /* - * Invert the proximity data so we return low values - * for close objects and high values for far ones. - */ - regmap_field_read(data->reg_ps_gain, &index); - *val = stk3310_ps_max[index] - *val; - } mutex_unlock(&data->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_INT_TIME: @@ -581,8 +562,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) } event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, - (dir ? IIO_EV_DIR_RISING : - IIO_EV_DIR_FALLING)); + (dir ? IIO_EV_DIR_FALLING : + IIO_EV_DIR_RISING)); iio_push_event(indio_dev, event, data->timestamp); /* Reset the interrupt flag */ -- cgit v0.10.2 From 5d6e834ac440ce6736b08880503c107d9dc3f320 Mon Sep 17 00:00:00 2001 From: Hartmut Knaack Date: Sat, 4 Jul 2015 17:56:57 +0200 Subject: iio:light:stk3310: Fix REGMAP_I2C dependency The stk3310 driver makes use of regmap_i2c, so this dependency needs to be reflected in Kconfig. Signed-off-by: Hartmut Knaack Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index e6198b7..df995a4 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -201,6 +201,7 @@ config LTR501 config STK3310 tristate "STK3310 ALS and proximity sensor" depends on I2C + select REGMAP_I2C help Say yes here to get support for the Sensortek STK3310 ambient light and proximity sensor. The STK3311 model is also supported by this -- cgit v0.10.2 From 2616dfa1d3061b02c3cbce60bc1a59281eccae31 Mon Sep 17 00:00:00 2001 From: Teodora Baluta Date: Mon, 29 Jun 2015 15:44:50 +0300 Subject: iio: magnetometer: mmc35240: fix available sampling frequencies Fix the sampling frequencies according to the datasheet (page 8). The datasheet specifies the following available frequencies for continuous mode: 1.5 Hz, 13 Hz, 25 Hz, and 50 Hz. Also fix comments about the ODR to comply with datasheet. Signed-off-by: Teodora Baluta Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index 7a2ea71..d927397 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c @@ -84,10 +84,10 @@ #define MMC35240_OTP_START_ADDR 0x1B enum mmc35240_resolution { - MMC35240_16_BITS_SLOW = 0, /* 100 Hz */ - MMC35240_16_BITS_FAST, /* 200 Hz */ - MMC35240_14_BITS, /* 333 Hz */ - MMC35240_12_BITS, /* 666 Hz */ + MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */ + MMC35240_16_BITS_FAST, /* 4.08 ms */ + MMC35240_14_BITS, /* 2.16 ms */ + MMC35240_12_BITS, /* 1.20 ms */ }; enum mmc35240_axis { @@ -100,22 +100,22 @@ static const struct { int sens[3]; /* sensitivity per X, Y, Z axis */ int nfo; /* null field output */ } mmc35240_props_table[] = { - /* 16 bits, 100Hz ODR */ + /* 16 bits, 125Hz ODR */ { {1024, 1024, 1024}, 32768, }, - /* 16 bits, 200Hz ODR */ + /* 16 bits, 250Hz ODR */ { {1024, 1024, 770}, 32768, }, - /* 14 bits, 333Hz ODR */ + /* 14 bits, 450Hz ODR */ { {256, 256, 193}, 8192, }, - /* 12 bits, 666Hz ODR */ + /* 12 bits, 800Hz ODR */ { {64, 64, 48}, 2048, @@ -133,9 +133,15 @@ struct mmc35240_data { int axis_scale[3]; }; -static const int mmc35240_samp_freq[] = {100, 200, 333, 666}; +static const struct { + int val; + int val2; +} mmc35240_samp_freq[] = { {1, 500000}, + {13, 0}, + {25, 0}, + {50, 0} }; -static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666"); +static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50"); #define MMC35240_CHANNEL(_axis) { \ .type = IIO_MAGN, \ @@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data, int i; for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++) - if (mmc35240_samp_freq[i] == val) + if (mmc35240_samp_freq[i].val == val && + mmc35240_samp_freq[i].val2 == val2) return i; return -EINVAL; } @@ -378,9 +385,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev, if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq)) return -EINVAL; - *val = mmc35240_samp_freq[i]; - *val2 = 0; - return IIO_VAL_INT; + *val = mmc35240_samp_freq[i].val; + *val2 = mmc35240_samp_freq[i].val2; + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } -- cgit v0.10.2 From 657c7ff56f8e96d4de7146103f08f3316dd822df Mon Sep 17 00:00:00 2001 From: Vlad Dogaru Date: Tue, 30 Jun 2015 14:20:58 +0300 Subject: iio: sx9500: rework error handling of raw readings Fix error handling so that we can power the chip down even if a raw read fails. Reported-by: Hartmut Knaack Signed-off-by: Vlad Dogaru Acked-by: Hartmut Knaack Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 2042e37..ba1cbbe 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -329,27 +329,29 @@ static int sx9500_read_proximity(struct sx9500_data *data, else ret = sx9500_wait_for_sample(data); - if (ret < 0) - return ret; - mutex_lock(&data->mutex); - ret = sx9500_read_prox_data(data, chan, val); if (ret < 0) - goto out; + goto out_dec_data_rdy; - ret = sx9500_dec_chan_users(data, chan->channel); + ret = sx9500_read_prox_data(data, chan, val); if (ret < 0) - goto out; + goto out_dec_data_rdy; ret = sx9500_dec_data_rdy_users(data); if (ret < 0) + goto out_dec_chan; + + ret = sx9500_dec_chan_users(data, chan->channel); + if (ret < 0) goto out; ret = IIO_VAL_INT; goto out; +out_dec_data_rdy: + sx9500_dec_data_rdy_users(data); out_dec_chan: sx9500_dec_chan_users(data, chan->channel); out: -- cgit v0.10.2 From 68958bd5ca748beba1251937d6f6561e70e97c14 Mon Sep 17 00:00:00 2001 From: Vlad Dogaru Date: Tue, 30 Jun 2015 14:20:59 +0300 Subject: iio: sx9500: fix bug in compensation code The initial compensation was mistakingly toggling an extra bit in the control register. Fix this and make sure it's less likely to happen by introducing an additional macro. Reported-by: Hartmut Knaack Signed-off-by: Vlad Dogaru Acked-by: Hartmut Knaack Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index ba1cbbe..798b973 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -80,6 +80,7 @@ #define SX9500_COMPSTAT_MASK GENMASK(3, 0) #define SX9500_NUM_CHANNELS 4 +#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0) struct sx9500_data { struct mutex mutex; @@ -802,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev) unsigned int val; ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, - GENMASK(SX9500_NUM_CHANNELS, 0), - GENMASK(SX9500_NUM_CHANNELS, 0)); + SX9500_CHAN_MASK, SX9500_CHAN_MASK); if (ret < 0) return ret; @@ -823,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev) out: regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, - GENMASK(SX9500_NUM_CHANNELS, 0), 0); + SX9500_CHAN_MASK, 0); return ret; } -- cgit v0.10.2 From 5919a0839bd65252f7306a0ee4879e697e00cab1 Mon Sep 17 00:00:00 2001 From: Hartmut Knaack Date: Sun, 28 Jun 2015 12:31:53 +0200 Subject: iio:light:ltr501: fix variable in ltr501_init When filling data->als_contr, the register content read into status needs to be used, instead of the return status value of regmap_read. Fixes: 8592a7eefa540 ("iio: ltr501: Add support for ltr559 chip") Signed-off-by: Hartmut Knaack Acked-by: Daniel Baluta Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 1ef7d37..b5a0e66 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data) if (ret < 0) return ret; - data->als_contr = ret | data->chip_info->als_mode_active; + data->als_contr = status | data->chip_info->als_mode_active; ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status); if (ret < 0) -- cgit v0.10.2 From 5d9fc0f63f561e7fa4da0883a4d0bd8e6a8b5de6 Mon Sep 17 00:00:00 2001 From: Hartmut Knaack Date: Sun, 28 Jun 2015 12:31:52 +0200 Subject: iio:light:ltr501: fix regmap dependency The use of regmap in commit 2f2c96338afc requires REGMAP_I2C to be selected, in order to meet all dependencies. Fixes: 2f2c96338afc ("iio: ltr501: Add regmap support.") Signed-off-by: Hartmut Knaack Acked-by: Daniel Baluta Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index df995a4..a5c5925 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -188,6 +188,7 @@ config SENSORS_LM3533 config LTR501 tristate "LTR-501ALS-01 light sensor" depends on I2C + select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help -- cgit v0.10.2 From 897993fecbb8e6292aaa4079815f0793dcf6de8b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 29 Jun 2015 09:14:33 +0200 Subject: iio: sx9500: Add missing init in sx9500_buffer_pre{en,dis}able() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/iio/proximity/sx9500.c: In function ‘sx9500_buffer_preenable’: drivers/iio/proximity/sx9500.c:682: warning: ‘ret’ may be used uninitialized in this function drivers/iio/proximity/sx9500.c: In function ‘sx9500_buffer_predisable’: drivers/iio/proximity/sx9500.c:706: warning: ‘ret’ may be used uninitialized in this function If active_scan_mask is empty, it will loop once more over all channels, doing nothing. Signed-off-by: Geert Uytterhoeven Reviewed-by: Vlad Dogaru Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 798b973..42072e0 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -682,7 +682,7 @@ out: static int sx9500_buffer_preenable(struct iio_dev *indio_dev) { struct sx9500_data *data = iio_priv(indio_dev); - int ret, i; + int ret = 0, i; mutex_lock(&data->mutex); @@ -706,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev) static int sx9500_buffer_predisable(struct iio_dev *indio_dev) { struct sx9500_data *data = iio_priv(indio_dev); - int ret, i; + int ret = 0, i; iio_triggered_buffer_predisable(indio_dev); -- cgit v0.10.2 From 8d05abfaeff52bdf66aba3a3a337dcdbdb4911bf Mon Sep 17 00:00:00 2001 From: Peter Meerwald Date: Sun, 21 Jun 2015 23:50:21 +0200 Subject: iio: tmp006: Check channel info on write only SAMP_FREQ is writable Will lead to SAMP_FREQ being written by any attempt to write to the other exported attributes and hence a rather unexpected result! Signed-off-by: Peter Meerwald Cc: Signed-off-by: Jonathan Cameron diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c index fcc49f8..8f21f32 100644 --- a/drivers/iio/temperature/tmp006.c +++ b/drivers/iio/temperature/tmp006.c @@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev, struct tmp006_data *data = iio_priv(indio_dev); int i; + if (mask != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++) if ((val == tmp006_freqs[i][0]) && (val2 == tmp006_freqs[i][1])) { -- cgit v0.10.2 From d90a45b1e005da3351de795d2ba6d52dc5a28837 Mon Sep 17 00:00:00 2001 From: Jens Kuske Date: Fri, 15 May 2015 18:38:51 +0200 Subject: Documentation: sunxi: Update Allwinner SoC documentation There are some new Allwinner SoCs available, namely A33, A83T and H3. Update the documentation to mention those and the related documents. Signed-off-by: Jens Kuske Signed-off-by: Maxime Ripard diff --git a/Documentation/arm/sunxi/README b/Documentation/arm/sunxi/README index 1fe2d7f..5e38e15 100644 --- a/Documentation/arm/sunxi/README +++ b/Documentation/arm/sunxi/README @@ -36,7 +36,7 @@ SunXi family + User Manual http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf - - Allwinner A23 + - Allwinner A23 (sun8i) + Datasheet http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf + User Manual @@ -55,7 +55,23 @@ SunXi family + User Manual http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf + - Allwinner A33 (sun8i) + + Datasheet + http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf + + User Manual + http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf + + - Allwinner H3 (sun8i) + + Datasheet + http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf + * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs - Allwinner A80 + Datasheet http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf + + * Octa ARM Cortex-A7 based SoCs + - Allwinner A83T + + Not Supported + + Datasheet + http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf -- cgit v0.10.2 From 14a882df14a5ae859b245bc708ce3fce47a91594 Mon Sep 17 00:00:00 2001 From: Jens Kuske Date: Fri, 15 May 2015 18:38:55 +0200 Subject: ARM: sunxi: Introduce Allwinner H3 support The Allwinner H3 is a quad-core Cortex-A7-based SoC. It is very similar to other sun8i family SoCs like the A23. Signed-off-by: Jens Kuske Signed-off-by: Maxime Ripard diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt index 42941fd..3cb4b94 100644 --- a/Documentation/devicetree/bindings/arm/sunxi.txt +++ b/Documentation/devicetree/bindings/arm/sunxi.txt @@ -9,4 +9,5 @@ using one of the following compatible strings: allwinner,sun6i-a31 allwinner,sun7i-a20 allwinner,sun8i-a23 + allwinner,sun8i-h3 allwinner,sun9i-a80 diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index 81502b9..4efe2d4 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -35,7 +35,7 @@ config MACH_SUN7I select SUN5I_HSTIMER config MACH_SUN8I - bool "Allwinner A23 (sun8i) SoCs support" + bool "Allwinner sun8i Family SoCs support" default ARCH_SUNXI select ARM_GIC select MFD_SUN6I_PRCM diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 1bc811a..8270902 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -67,10 +67,12 @@ MACHINE_END static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-a23", + "allwinner,sun8i-h3", NULL, }; -DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family") +DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family") + .init_time = sun6i_timer_init, .dt_compat = sun8i_board_dt_compat, .init_late = sunxi_dt_cpufreq_init, MACHINE_END -- cgit v0.10.2 From 159870d2413c92622790e9cecbce95099bed539a Mon Sep 17 00:00:00 2001 From: Vishnu Patekar Date: Sat, 30 May 2015 16:55:01 +0200 Subject: ARM: sunxi: Add Machine support for A33 Add machine support for the Allwinner A33 quad core cortex-a7 based SoC, which is similar to the A23 SoC. Signed-off-by: Vishnu Patekar Signed-off-by: Hans de Goede Signed-off-by: Maxime Ripard Tested-by: Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt index 3cb4b94..67da205 100644 --- a/Documentation/devicetree/bindings/arm/sunxi.txt +++ b/Documentation/devicetree/bindings/arm/sunxi.txt @@ -9,5 +9,6 @@ using one of the following compatible strings: allwinner,sun6i-a31 allwinner,sun7i-a20 allwinner,sun8i-a23 + allwinner,sun8i-a33 allwinner,sun8i-h3 allwinner,sun9i-a80 diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 8270902..65bab28 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -67,6 +67,7 @@ MACHINE_END static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-a23", + "allwinner,sun8i-a33", "allwinner,sun8i-h3", NULL, }; diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 7e1e2bd..6d25e4e 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -1389,6 +1389,7 @@ static void __init sun6i_init_clocks(struct device_node *node) CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks); CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks); CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks); +CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks); static void __init sun9i_init_clocks(struct device_node *node) { -- cgit v0.10.2 From 07d1f344159764bcd4e228e4a65e804dfe59dfe9 Mon Sep 17 00:00:00 2001 From: Timo Sigurdsson Date: Mon, 6 Apr 2015 22:57:22 +0200 Subject: ARM: Remove deprecated symbol from defconfig files Commit b2b3a8b934e6 ("power/reset: Remove sun6i reboot driver") removed the sun6i reboot driver. But sunxi_defconfig and multi_v7_defconfig still contain the symbol CONFIG_POWER_RESET_SUN6I that was deprecated by that commit, so remove it. Signed-off-by: Timo Sigurdsson Signed-off-by: Maxime Ripard diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index ab86655..22df002 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -319,7 +319,6 @@ CONFIG_POWER_RESET_AS3722=y CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_POWER_RESET_KEYSTONE=y -CONFIG_POWER_RESET_SUN6I=y CONFIG_POWER_RESET_RMOBILE=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 8ecba00..2c7af0a 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -77,7 +77,6 @@ CONFIG_SPI_SUN6I=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_SUN6I=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y CONFIG_WATCHDOG=y -- cgit v0.10.2 From 5738563bf6f2f1631dc71a35293b135f90969a35 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 20 Apr 2015 21:48:40 +0200 Subject: ARM: sunxi: Enable simplefb in the defconfig Now that we have simplefb support, we can enable it in our defconfig. Also enable the framebuffer console, so that we are sure that we actually get something displayed in any case. And while we're at it, enable the module support. Signed-off-by: Maxime Ripard diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 2c7af0a..7ebc346b 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -2,6 +2,7 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_PERF_EVENTS=y +CONFIG_MODULES=y CONFIG_ARCH_SUNXI=y CONFIG_SMP=y CONFIG_NR_CPUS=8 @@ -86,6 +87,10 @@ CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_GPIO=y +CONFIG_FB=y +CONFIG_FB_SIMPLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y -- cgit v0.10.2 From 7444a072c387a93ebee7066e8aee776954ab0e41 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Sun, 5 Jul 2015 12:33:44 -0400 Subject: ext4: replace open coded nofail allocation in ext4_free_blocks() ext4_free_blocks is looping around the allocation request and mimics __GFP_NOFAIL behavior without any allocation fallback strategy. Let's remove the open coded loop and replace it with __GFP_NOFAIL. Without the flag the allocator has no way to find out never-fail requirement and cannot help in any way. Signed-off-by: Michal Hocko Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 1c535fa..2299d62 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4815,18 +4815,12 @@ do_more: /* * blocks being freed are metadata. these blocks shouldn't * be used until this transaction is committed + * + * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed + * to fail. */ - retry: - new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); - if (!new_entry) { - /* - * We use a retry loop because - * ext4_free_blocks() is not allowed to fail. - */ - cond_resched(); - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry; - } + new_entry = kmem_cache_alloc(ext4_free_data_cachep, + GFP_NOFS|__GFP_NOFAIL); new_entry->efd_start_cluster = bit; new_entry->efd_group = block_group; new_entry->efd_count = count_clusters; -- cgit v0.10.2 From 728abda6a6654ee7f4e903dc921c6307065e1644 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 5 Jul 2015 14:12:42 -0300 Subject: tools: Adopt {READ,WRITE_ONCE} from the kernel We need it to build rbtree.c after this cset: commit d72da4a4d973 Author: Peter Zijlstra Date: Wed May 27 11:09:36 2015 +0930 rbtree: Make lockless searches non-fatal Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-qlnzhezv5ddwst0w9fydju0y@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index f0e7267..9098083 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -41,4 +41,62 @@ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#include + +static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; + default: + barrier(); + __builtin_memcpy((void *)res, (const void *)p, size); + barrier(); + } +} + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile __u8 *)p = *(__u8 *)res; break; + case 2: *(volatile __u16 *)p = *(__u16 *)res; break; + case 4: *(volatile __u32 *)p = *(__u32 *)res; break; + case 8: *(volatile __u64 *)p = *(__u64 *)res; break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the + * compiler is aware of some particular ordering. One way to make the + * compiler aware of ordering is to put the two invocations of READ_ONCE, + * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * + * In contrast to ACCESS_ONCE these two macros will also work on aggregate + * data types like structs or unions. If the size of the accessed data + * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a + * compile-time warning. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ + +#define READ_ONCE(x) \ + ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + +#define WRITE_ONCE(x, val) \ + ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + #endif /* _TOOLS_LINUX_COMPILER_H */ -- cgit v0.10.2 From 4407f967441aa1adfc11f739e8e9ec0f38fa839f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 5 Jul 2015 14:17:04 -0300 Subject: perf tools: Copy rbtree.h from the kernel We were using the include/linux/rbtree.h directly from the kernel, which broke the build as soon as it started using rcupdate.h, to avoid dragging the rcu header files into tools/, for which there is no use so far, grab a copy of rbtree.h. This is the minimal fix, later patches will copy as well lib/rbtree.c and move rbtree.h into tools/include/, etc. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-dfmuj0j63w4by7vhlh4hhn74@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index fe50a1b..6504b72 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -51,7 +51,6 @@ include/asm-generic/bitops/fls64.h include/asm-generic/bitops/__fls.h include/asm-generic/bitops/fls.h include/linux/perf_event.h -include/linux/rbtree.h include/linux/list.h include/linux/hash.h include/linux/stringify.h diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h index f06d89f..1125822 100644 --- a/tools/perf/util/include/linux/rbtree.h +++ b/tools/perf/util/include/linux/rbtree.h @@ -1,7 +1,95 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + See Documentation/rbtree.txt for documentation and samples. +*/ + #ifndef __TOOLS_LINUX_PERF_RBTREE_H #define __TOOLS_LINUX_PERF_RBTREE_H -#include -#include "../../../../include/linux/rbtree.h" + +#include +#include + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) + +/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ +#define RB_EMPTY_NODE(node) \ + ((node)->__rb_parent_color == (unsigned long)(node)) +#define RB_CLEAR_NODE(node) \ + ((node)->__rb_parent_color = (unsigned long)(node)) + + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Postorder iteration - always visit the parent after its children */ +extern struct rb_node *rb_first_postorder(const struct rb_root *); +extern struct rb_node *rb_next_postorder(const struct rb_node *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#define rb_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? rb_entry(____ptr, type, member) : NULL; \ + }) + /* * Handy for checking that we are not deleting an entry that is -- cgit v0.10.2 From 4c7e309340ff85072e96f529582d159002c36734 Mon Sep 17 00:00:00 2001 From: Dennis Yang Date: Fri, 26 Jun 2015 15:25:48 +0100 Subject: dm btree remove: fix bug in redistribute3 redistribute3() shares entries out across 3 nodes. Some entries were being moved the wrong way, breaking the ordering. This manifested as a BUG() in dm-btree-remove.c:shift() when entries were removed from the btree. For additional context see: https://www.redhat.com/archives/dm-devel/2015-May/msg00113.html Signed-off-by: Dennis Yang Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index e04cfd2..9836c0a 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, if (s < 0 && nr_center < -s) { /* not enough in central node */ - shift(left, center, nr_center); - s = nr_center - target; + shift(left, center, -nr_center); + s += nr_center; shift(left, right, s); nr_right += s; } else @@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, if (s > 0 && nr_center < s) { /* not enough in central node */ shift(center, right, nr_center); - s = target - nr_center; + s -= nr_center; shift(left, right, s); nr_left -= s; } else -- cgit v0.10.2 From 3f735377bfd6567d80815a6242c147211963680a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 5 Jul 2015 22:48:21 -0300 Subject: tools: Copy lib/rbtree.c to tools/lib/ So that we can remove kernel specific stuff we've been stubbing out via a tools/include/linux/export.h that gets removed in this patch and to avoid breakages in the future like the one fixed recently where rcupdate.h started being used in rbtree.h. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-rxuzfsozpb8hv1emwpx06rm6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h deleted file mode 100644 index d07e586..0000000 --- a/tools/include/linux/export.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _TOOLS_LINUX_EXPORT_H_ -#define _TOOLS_LINUX_EXPORT_H_ - -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#define EXPORT_SYMBOL_GPL_FUTURE(sym) -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) - -#endif diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c new file mode 100644 index 0000000..17c2b59 --- /dev/null +++ b/tools/lib/rbtree.c @@ -0,0 +1,548 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/lib/rbtree.c +*/ + +#include + +/* + * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree + * + * 1) A node is either red or black + * 2) The root is black + * 3) All leaves (NULL) are black + * 4) Both children of every red node are black + * 5) Every simple path from root to leaves contains the same number + * of black nodes. + * + * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two + * consecutive red nodes in a path and every red node is therefore followed by + * a black. So if B is the number of black nodes on every simple path (as per + * 5), then the longest possible path due to 4 is 2B. + * + * We shall indicate color with case, where black nodes are uppercase and red + * nodes will be lowercase. Unknown color nodes shall be drawn as red within + * parentheses and have some accompanying text comment. + */ + +static inline void rb_set_black(struct rb_node *rb) +{ + rb->__rb_parent_color |= RB_BLACK; +} + +static inline struct rb_node *rb_red_parent(struct rb_node *red) +{ + return (struct rb_node *)red->__rb_parent_color; +} + +/* + * Helper function for rotations: + * - old's parent and color get assigned to new + * - old gets assigned new as a parent and 'color' as a color. + */ +static inline void +__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, + struct rb_root *root, int color) +{ + struct rb_node *parent = rb_parent(old); + new->__rb_parent_color = old->__rb_parent_color; + rb_set_parent_color(old, new, color); + __rb_change_child(old, new, parent, root); +} + +static __always_inline void +__rb_insert(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; + + while (true) { + /* + * Loop invariant: node is red + * + * If there is a black parent, we are done. + * Otherwise, take some corrective action as we don't + * want a red root or two consecutive red nodes. + */ + if (!parent) { + rb_set_parent_color(node, NULL, RB_BLACK); + break; + } else if (rb_is_black(parent)) + break; + + gparent = rb_red_parent(parent); + + tmp = gparent->rb_right; + if (parent != tmp) { /* parent == gparent->rb_left */ + if (tmp && rb_is_red(tmp)) { + /* + * Case 1 - color flips + * + * G g + * / \ / \ + * p u --> P U + * / / + * n n + * + * However, since g's parent might be red, and + * 4) does not allow this, we need to recurse + * at g. + */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_right; + if (node == tmp) { + /* + * Case 2 - left rotate at parent + * + * G G + * / \ / \ + * p U --> n U + * \ / + * n p + * + * This still leaves us in violation of 4), the + * continuation into Case 3 will fix that. + */ + parent->rb_right = tmp = node->rb_left; + node->rb_left = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); + parent = node; + tmp = node->rb_right; + } + + /* + * Case 3 - right rotate at gparent + * + * G P + * / \ / \ + * p U --> n g + * / \ + * n U + */ + gparent->rb_left = tmp; /* == parent->rb_right */ + parent->rb_right = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; + } else { + tmp = gparent->rb_left; + if (tmp && rb_is_red(tmp)) { + /* Case 1 - color flips */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_left; + if (node == tmp) { + /* Case 2 - right rotate at parent */ + parent->rb_left = tmp = node->rb_right; + node->rb_right = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); + parent = node; + tmp = node->rb_left; + } + + /* Case 3 - left rotate at gparent */ + gparent->rb_right = tmp; /* == parent->rb_left */ + parent->rb_left = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; + } + } +} + +/* + * Inline version for rb_erase() use - we want to be able to inline + * and eliminate the dummy_rotate callback there + */ +static __always_inline void +____rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; + + while (true) { + /* + * Loop invariants: + * - node is black (or NULL on first iteration) + * - node is not the root (parent is not NULL) + * - All leaf paths going through parent and node have a + * black node count that is 1 lower than other leaf paths. + */ + sibling = parent->rb_right; + if (node != sibling) { /* node == parent->rb_left */ + if (rb_is_red(sibling)) { + /* + * Case 1 - left rotate at parent + * + * P S + * / \ / \ + * N s --> p Sr + * / \ / \ + * Sl Sr N Sl + */ + parent->rb_right = tmp1 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; + } + tmp1 = sibling->rb_right; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_left; + if (!tmp2 || rb_is_black(tmp2)) { + /* + * Case 2 - sibling color flip + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N s + * / \ / \ + * Sl Sr Sl Sr + * + * This leaves us violating 5) which + * can be fixed by flipping p to black + * if it was red, or by recursing at p. + * p is red when coming from Case 1. + */ + rb_set_parent_color(sibling, parent, + RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; + } + /* + * Case 3 - right rotate at sibling + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N Sl + * / \ \ + * sl Sr s + * \ + * Sr + */ + sibling->rb_left = tmp1 = tmp2->rb_right; + tmp2->rb_right = sibling; + parent->rb_right = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; + } + /* + * Case 4 - left rotate at parent + color flips + * (p and sl could be either color here. + * After rotation, p becomes black, s acquires + * p's color, and sl keeps its color) + * + * (p) (s) + * / \ / \ + * N S --> P Sr + * / \ / \ + * (sl) sr N (sl) + */ + parent->rb_right = tmp2 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); + augment_rotate(parent, sibling); + break; + } else { + sibling = parent->rb_left; + if (rb_is_red(sibling)) { + /* Case 1 - right rotate at parent */ + parent->rb_left = tmp1 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; + } + tmp1 = sibling->rb_left; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_right; + if (!tmp2 || rb_is_black(tmp2)) { + /* Case 2 - sibling color flip */ + rb_set_parent_color(sibling, parent, + RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; + } + /* Case 3 - right rotate at sibling */ + sibling->rb_right = tmp1 = tmp2->rb_left; + tmp2->rb_left = sibling; + parent->rb_left = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; + } + /* Case 4 - left rotate at parent + color flips */ + parent->rb_left = tmp2 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); + augment_rotate(parent, sibling); + break; + } + } +} + +/* Non-inline version for rb_erase_augmented() use */ +void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + ____rb_erase_color(parent, root, augment_rotate); +} + +/* + * Non-augmented rbtree manipulation functions. + * + * We use dummy augmented callbacks here, and have the compiler optimize them + * out of the rb_insert_color() and rb_erase() function definitions. + */ + +static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} +static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} +static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} + +static const struct rb_augment_callbacks dummy_callbacks = { + dummy_propagate, dummy_copy, dummy_rotate +}; + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + __rb_insert(node, root, dummy_rotate); +} + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, root, dummy_rotate); +} + +/* + * Augmented rbtree manipulation functions. + * + * This instantiates the same __always_inline functions as in the non-augmented + * case, but this time with user-defined callbacks. + */ + +void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + __rb_insert(node, root, augment_rotate); +} + +/* + * This function returns the first node (in sort order) of the tree. + */ +struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) + n = n->rb_left; + return n; +} + +struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) + n = n->rb_right; + return n; +} + +struct rb_node *rb_next(const struct rb_node *node) +{ + struct rb_node *parent; + + if (RB_EMPTY_NODE(node)) + return NULL; + + /* + * If we have a right-hand child, go down and then left as far + * as we can. + */ + if (node->rb_right) { + node = node->rb_right; + while (node->rb_left) + node=node->rb_left; + return (struct rb_node *)node; + } + + /* + * No right-hand children. Everything down and left is smaller than us, + * so any 'next' node must be in the general direction of our parent. + * Go up the tree; any time the ancestor is a right-hand child of its + * parent, keep going up. First time it's a left-hand child of its + * parent, said parent is our 'next' node. + */ + while ((parent = rb_parent(node)) && node == parent->rb_right) + node = parent; + + return parent; +} + +struct rb_node *rb_prev(const struct rb_node *node) +{ + struct rb_node *parent; + + if (RB_EMPTY_NODE(node)) + return NULL; + + /* + * If we have a left-hand child, go down and then right as far + * as we can. + */ + if (node->rb_left) { + node = node->rb_left; + while (node->rb_right) + node=node->rb_right; + return (struct rb_node *)node; + } + + /* + * No left-hand children. Go up till we find an ancestor which + * is a right-hand child of its parent. + */ + while ((parent = rb_parent(node)) && node == parent->rb_left) + node = parent; + + return parent; +} + +void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); + + /* Set the surrounding nodes to point to the replacement */ + __rb_change_child(victim, new, parent, root); + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; +} + +static struct rb_node *rb_left_deepest_node(const struct rb_node *node) +{ + for (;;) { + if (node->rb_left) + node = node->rb_left; + else if (node->rb_right) + node = node->rb_right; + else + return (struct rb_node *)node; + } +} + +struct rb_node *rb_next_postorder(const struct rb_node *node) +{ + const struct rb_node *parent; + if (!node) + return NULL; + parent = rb_parent(node); + + /* If we're sitting on node, we've already seen our children */ + if (parent && node == parent->rb_left && parent->rb_right) { + /* If we are the parent's left node, go to the parent's right + * node then all the way down to the left */ + return rb_left_deepest_node(parent->rb_right); + } else + /* Otherwise we are the parent's right node, and the parent + * should be next */ + return (struct rb_node *)parent; +} + +struct rb_node *rb_first_postorder(const struct rb_root *root) +{ + if (!root->rb_node) + return NULL; + + return rb_left_deepest_node(root->rb_node); +} diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 6504b72..ff667e3 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h tools/arch/x86/include/asm/rmwcc.h tools/lib/traceevent tools/lib/api +tools/lib/rbtree.c tools/lib/symbol/kallsyms.c tools/lib/symbol/kallsyms.h tools/lib/util/find_next_bit.c @@ -55,7 +56,6 @@ include/linux/list.h include/linux/hash.h include/linux/stringify.h lib/hweight.c -lib/rbtree.c include/linux/swab.h arch/*/include/asm/unistd*.h arch/*/include/uapi/asm/unistd*.h diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 586a59d..601d114 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) -$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE +$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) -- cgit v0.10.2 From 03da23a34a039a74f550646e68d562655306e331 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 5 Jul 2015 22:54:01 -0300 Subject: tools: Move rbtree.h from tools/perf/ The previous step, copying the contents minus the rcupdate.h parts, was done as a minimal fix, now do the move from tools/perf/. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-52fllxtsgmtke66pmv98mcma@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h new file mode 100644 index 0000000..1125822 --- /dev/null +++ b/tools/include/linux/rbtree.h @@ -0,0 +1,104 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + See Documentation/rbtree.txt for documentation and samples. +*/ + +#ifndef __TOOLS_LINUX_PERF_RBTREE_H +#define __TOOLS_LINUX_PERF_RBTREE_H + +#include +#include + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) + +/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ +#define RB_EMPTY_NODE(node) \ + ((node)->__rb_parent_color == (unsigned long)(node)) +#define RB_CLEAR_NODE(node) \ + ((node)->__rb_parent_color = (unsigned long)(node)) + + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Postorder iteration - always visit the parent after its children */ +extern struct rb_node *rb_first_postorder(const struct rb_root *); +extern struct rb_node *rb_next_postorder(const struct rb_node *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#define rb_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? rb_entry(____ptr, type, member) : NULL; \ + }) + + +/* + * Handy for checking that we are not deleting an entry that is + * already in a list, found in block/{blk-throttle,cfq-iosched}.c, + * probably should be moved to lib/rbtree.c... + */ +static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} +#endif /* __TOOLS_LINUX_PERF_RBTREE_H */ diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index ff667e3..9df2681 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -45,6 +45,7 @@ tools/include/linux/kernel.h tools/include/linux/list.h tools/include/linux/log2.h tools/include/linux/poison.h +tools/include/linux/rbtree.h tools/include/linux/types.h include/asm-generic/bitops/arch_hweight.h include/asm-generic/bitops/const_hweight.h diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h deleted file mode 100644 index 1125822..0000000 --- a/tools/perf/util/include/linux/rbtree.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - Red Black Trees - (C) 1999 Andrea Arcangeli - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - linux/include/linux/rbtree.h - - To use rbtrees you'll have to implement your own insert and search cores. - This will avoid us to use callbacks and to drop drammatically performances. - I know it's not the cleaner way, but in C (not in C++) to get - performances and genericity... - - See Documentation/rbtree.txt for documentation and samples. -*/ - -#ifndef __TOOLS_LINUX_PERF_RBTREE_H -#define __TOOLS_LINUX_PERF_RBTREE_H - -#include -#include - -struct rb_node { - unsigned long __rb_parent_color; - struct rb_node *rb_right; - struct rb_node *rb_left; -} __attribute__((aligned(sizeof(long)))); - /* The alignment might seem pointless, but allegedly CRIS needs it */ - -struct rb_root { - struct rb_node *rb_node; -}; - - -#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) - -#define RB_ROOT (struct rb_root) { NULL, } -#define rb_entry(ptr, type, member) container_of(ptr, type, member) - -#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) - -/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ -#define RB_EMPTY_NODE(node) \ - ((node)->__rb_parent_color == (unsigned long)(node)) -#define RB_CLEAR_NODE(node) \ - ((node)->__rb_parent_color = (unsigned long)(node)) - - -extern void rb_insert_color(struct rb_node *, struct rb_root *); -extern void rb_erase(struct rb_node *, struct rb_root *); - - -/* Find logical next and previous nodes in a tree */ -extern struct rb_node *rb_next(const struct rb_node *); -extern struct rb_node *rb_prev(const struct rb_node *); -extern struct rb_node *rb_first(const struct rb_root *); -extern struct rb_node *rb_last(const struct rb_root *); - -/* Postorder iteration - always visit the parent after its children */ -extern struct rb_node *rb_first_postorder(const struct rb_root *); -extern struct rb_node *rb_next_postorder(const struct rb_node *); - -/* Fast replacement of a single node without remove/rebalance/add/rebalance */ -extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, - struct rb_root *root); - -static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, - struct rb_node **rb_link) -{ - node->__rb_parent_color = (unsigned long)parent; - node->rb_left = node->rb_right = NULL; - - *rb_link = node; -} - -#define rb_entry_safe(ptr, type, member) \ - ({ typeof(ptr) ____ptr = (ptr); \ - ____ptr ? rb_entry(____ptr, type, member) : NULL; \ - }) - - -/* - * Handy for checking that we are not deleting an entry that is - * already in a list, found in block/{blk-throttle,cfq-iosched}.c, - * probably should be moved to lib/rbtree.c... - */ -static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) -{ - rb_erase(n, root); - RB_CLEAR_NODE(n); -} -#endif /* __TOOLS_LINUX_PERF_RBTREE_H */ -- cgit v0.10.2 From 307bc971959aaa2df44032e7f6b0bda1f7e26890 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 5 Jul 2015 22:59:05 -0300 Subject: tools: Copy rbtree_augmented.h from the kernel To complete the transitioning to not to share the same files with the kernel, also moving it from tools/perf/include/linux/ to tools/include/linux to make the whoke rbtree kit to other tools/ living codebases. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-5bxyehixafckqm6ez25alnfo@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h new file mode 100644 index 0000000..43be941 --- /dev/null +++ b/tools/include/linux/rbtree_augmented.h @@ -0,0 +1,245 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + tools/linux/include/linux/rbtree_augmented.h + + Copied from: + linux/include/linux/rbtree_augmented.h +*/ + +#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H +#define _TOOLS_LINUX_RBTREE_AUGMENTED_H + +#include +#include + +/* + * Please note - only struct rb_augment_callbacks and the prototypes for + * rb_insert_augmented() and rb_erase_augmented() are intended to be public. + * The rest are implementation details you are not expected to depend on. + * + * See Documentation/rbtree.txt for documentation and samples. + */ + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *node, struct rb_node *stop); + void (*copy)(struct rb_node *old, struct rb_node *new); + void (*rotate)(struct rb_node *old, struct rb_node *new); +}; + +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); +/* + * Fixup the rbtree and update the augmented information when rebalancing. + * + * On insertion, the user must update the augmented information on the path + * leading to the inserted node, then call rb_link_node() as usual and + * rb_augment_inserted() instead of the usual rb_insert_color() call. + * If rb_augment_inserted() rebalances the rbtree, it will callback into + * a user provided function to update the augmented information on the + * affected subtrees. + */ +static inline void +rb_insert_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + __rb_insert_augmented(node, root, augment->rotate); +} + +#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ + rbtype, rbaugmented, rbcompute) \ +static inline void \ +rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +{ \ + while (rb != stop) { \ + rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ + rbtype augmented = rbcompute(node); \ + if (node->rbaugmented == augmented) \ + break; \ + node->rbaugmented = augmented; \ + rb = rb_parent(&node->rbfield); \ + } \ +} \ +static inline void \ +rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ +} \ +static void \ +rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ + old->rbaugmented = rbcompute(old); \ +} \ +rbstatic const struct rb_augment_callbacks rbname = { \ + rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ +}; + + +#define RB_RED 0 +#define RB_BLACK 1 + +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) + +#define __rb_color(pc) ((pc) & 1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; +} + +static inline void rb_set_parent_color(struct rb_node *rb, + struct rb_node *p, int color) +{ + rb->__rb_parent_color = (unsigned long)p | color; +} + +static inline void +__rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + parent->rb_left = new; + else + parent->rb_right = new; + } else + root->rb_node = new; +} + +extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *child = node->rb_right, *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + if (!tmp) { + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ + pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, child, parent, root); + if (child) { + child->__rb_parent_color = pc; + rebalance = NULL; + } else + rebalance = __rb_is_black(pc) ? parent : NULL; + tmp = parent; + } else if (!child) { + /* Still case 1, but this time the child is node->rb_left */ + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, tmp, parent, root); + rebalance = NULL; + tmp = parent; + } else { + struct rb_node *successor = child, *child2; + tmp = child->rb_left; + if (!tmp) { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = successor; + child2 = successor->rb_right; + augment->copy(node, successor); + } else { + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while (tmp); + parent->rb_left = child2 = successor->rb_right; + successor->rb_right = child; + rb_set_parent(child, successor); + augment->copy(node, successor); + augment->propagate(parent, successor); + } + + successor->rb_left = tmp = node->rb_left; + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + if (child2) { + successor->__rb_parent_color = pc; + rb_set_parent_color(child2, parent, RB_BLACK); + rebalance = NULL; + } else { + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; + } + tmp = successor; + } + + augment->propagate(tmp, NULL); + return rebalance; +} + +static __always_inline void +rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); + if (rebalance) + __rb_erase_color(rebalance, root, augment->rotate); +} + +#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */ diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 9df2681..09dc0aa 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -46,6 +46,7 @@ tools/include/linux/list.h tools/include/linux/log2.h tools/include/linux/poison.h tools/include/linux/rbtree.h +tools/include/linux/rbtree_augmented.h tools/include/linux/types.h include/asm-generic/bitops/arch_hweight.h include/asm-generic/bitops/const_hweight.h @@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S arch/*/lib/memset*.S include/linux/poison.h include/linux/hw_breakpoint.h -include/linux/rbtree_augmented.h include/uapi/linux/perf_event.h include/uapi/linux/const.h include/uapi/linux/swab.h diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h deleted file mode 100644 index 9d6fcdf..0000000 --- a/tools/perf/util/include/linux/rbtree_augmented.h +++ /dev/null @@ -1,2 +0,0 @@ -#include -#include "../../../../include/linux/rbtree_augmented.h" -- cgit v0.10.2 From a822c83e47d97cdef38c4352e1ef62d9f46cfe98 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Fri, 3 Jul 2015 10:22:42 +0100 Subject: dm thin: allocate the cell_sort_array dynamically Given the pool's cell_sort_array holds 8192 pointers it triggers an order 5 allocation via kmalloc. This order 5 allocation is prone to failure as system memory gets more fragmented over time. Fix this by allocating the cell_sort_array using vmalloc. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c33f61a..8f015d9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -268,7 +269,7 @@ struct pool { process_mapping_fn process_prepared_mapping; process_mapping_fn process_prepared_discard; - struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE]; + struct dm_bio_prison_cell **cell_sort_array; }; static enum pool_mode get_pool_mode(struct pool *pool); @@ -2777,6 +2778,7 @@ static void __pool_destroy(struct pool *pool) { __pool_table_remove(pool); + vfree(pool->cell_sort_array); if (dm_pool_metadata_close(pool->pmd) < 0) DMWARN("%s: dm_pool_metadata_close() failed.", __func__); @@ -2889,6 +2891,13 @@ static struct pool *pool_create(struct mapped_device *pool_md, goto bad_mapping_pool; } + pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); + if (!pool->cell_sort_array) { + *error = "Error allocating cell sort array"; + err_p = ERR_PTR(-ENOMEM); + goto bad_sort_array; + } + pool->ref_count = 1; pool->last_commit_jiffies = jiffies; pool->pool_md = pool_md; @@ -2897,6 +2906,8 @@ static struct pool *pool_create(struct mapped_device *pool_md, return pool; +bad_sort_array: + mempool_destroy(pool->mapping_pool); bad_mapping_pool: dm_deferred_set_destroy(pool->all_io_ds); bad_all_io_ds: -- cgit v0.10.2 From d8ea782b56d9d2c46a47b3231cfd16ecfb538c60 Mon Sep 17 00:00:00 2001 From: Vaidyanathan Srinivasan Date: Mon, 29 Jun 2015 10:47:55 +0530 Subject: powerpc/powernv: Fix vma page prot flags in opal-prd driver opal-prd driver will mmap() firmware code/data area as private mapping to prd user space daemon. Write to this page will trigger COW faults. The new COW pages are normal kernel RAM pages accounted by the kernel and are not special. vma->vm_page_prot value will be used at page fault time for the new COW pages, while pgprot_t value passed in remap_pfn_range() is used for the initial page table entry. Hence: * Do not add _PAGE_SPECIAL in vma, but only for remap_pfn_range() * Also remap_pfn_range() will add the _PAGE_SPECIAL flag using pte_mkspecial() call, hence no need to specify in the driver This fix resolves the page accounting warning shown below: BUG: Bad rss-counter state mm:c0000007d34ac600 idx:1 val:19 The above warning is triggered since _PAGE_SPECIAL was incorrectly being set for the normal kernel COW pages. Signed-off-by: Vaidyanathan Srinivasan Acked-by: Jeremy Kerr Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 46cb3fe..4ece8e4 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file) static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) { size_t addr, size; + pgprot_t page_prot; int rc; pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", @@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) if (!opal_prd_range_is_valid(addr, size)) return -EINVAL; - vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file, - vma->vm_pgoff, - size, vma->vm_page_prot)) - | _PAGE_SPECIAL); + page_prot = phys_mem_access_prot(file, vma->vm_pgoff, + size, vma->vm_page_prot); rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, - vma->vm_page_prot); + page_prot); return rc; } -- cgit v0.10.2 From 0fd972a7d91d6e15393c449492a04d94c0b89351 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 1 May 2015 20:13:42 -0400 Subject: module: relocate module_init from init.h to module.h Modular users will always be users of init functionality, but users of init functionality are not necessarily always modules. Hence any functionality like module_init and module_exit would be more at home in the module.h file. And module.h should explicitly include init.h to make the dependency clear. We've already done all the legwork needed to ensure that this move does not cause any build regressions due to implicit header file include assumptions about where module_init lives. Cc: Rusty Russell Acked-by: Rusty Russell Signed-off-by: Paul Gortmaker diff --git a/include/linux/init.h b/include/linux/init.h index 7c68c36..b449f37 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -282,68 +282,8 @@ void __init parse_early_param(void); void __init parse_early_options(char *cmdline); #endif /* __ASSEMBLY__ */ -/** - * module_init() - driver initialization entry point - * @x: function to be run at kernel boot time or module insertion - * - * module_init() will either be called during do_initcalls() (if - * builtin) or at module insertion time (if a module). There can only - * be one per module. - */ -#define module_init(x) __initcall(x); - -/** - * module_exit() - driver exit entry point - * @x: function to be run when driver is removed - * - * module_exit() will wrap the driver clean-up code - * with cleanup_module() when used with rmmod when - * the driver is a module. If the driver is statically - * compiled into the kernel, module_exit() has no effect. - * There can only be one per module. - */ -#define module_exit(x) __exitcall(x); - #else /* MODULE */ -/* - * In most cases loadable modules do not need custom - * initcall levels. There are still some valid cases where - * a driver may be needed early if built in, and does not - * matter when built as a loadable module. Like bus - * snooping debug drivers. - */ -#define early_initcall(fn) module_init(fn) -#define core_initcall(fn) module_init(fn) -#define core_initcall_sync(fn) module_init(fn) -#define postcore_initcall(fn) module_init(fn) -#define postcore_initcall_sync(fn) module_init(fn) -#define arch_initcall(fn) module_init(fn) -#define subsys_initcall(fn) module_init(fn) -#define subsys_initcall_sync(fn) module_init(fn) -#define fs_initcall(fn) module_init(fn) -#define fs_initcall_sync(fn) module_init(fn) -#define rootfs_initcall(fn) module_init(fn) -#define device_initcall(fn) module_init(fn) -#define device_initcall_sync(fn) module_init(fn) -#define late_initcall(fn) module_init(fn) -#define late_initcall_sync(fn) module_init(fn) - -#define console_initcall(fn) module_init(fn) -#define security_initcall(fn) module_init(fn) - -/* Each module must use one module_init(). */ -#define module_init(initfn) \ - static inline initcall_t __inittest(void) \ - { return initfn; } \ - int init_module(void) __attribute__((alias(#initfn))); - -/* This is only required if you want to be unloadable. */ -#define module_exit(exitfn) \ - static inline exitcall_t __exittest(void) \ - { return exitfn; } \ - void cleanup_module(void) __attribute__((alias(#exitfn))); - #define __setup_param(str, unique_id, fn) /* nothing */ #define __setup(str, func) /* nothing */ #endif @@ -351,24 +291,6 @@ void __init parse_early_options(char *cmdline); /* Data marked not to be saved by software suspend */ #define __nosavedata __section(.data..nosave) -/* This means "can be init if no module support, otherwise module load - may call it." */ -#ifdef CONFIG_MODULES -#define __init_or_module -#define __initdata_or_module -#define __initconst_or_module -#define __INIT_OR_MODULE .text -#define __INITDATA_OR_MODULE .data -#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits -#else -#define __init_or_module __init -#define __initdata_or_module __initdata -#define __initconst_or_module __initconst -#define __INIT_OR_MODULE __INIT -#define __INITDATA_OR_MODULE __INITDATA -#define __INITRODATA_OR_MODULE __INITRODATA -#endif /*CONFIG_MODULES*/ - #ifdef MODULE #define __exit_p(x) x #else diff --git a/include/linux/module.h b/include/linux/module.h index d67b193..3a19c79 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -71,6 +72,89 @@ extern struct module_attribute module_uevent; extern int init_module(void); extern void cleanup_module(void); +#ifndef MODULE +/** + * module_init() - driver initialization entry point + * @x: function to be run at kernel boot time or module insertion + * + * module_init() will either be called during do_initcalls() (if + * builtin) or at module insertion time (if a module). There can only + * be one per module. + */ +#define module_init(x) __initcall(x); + +/** + * module_exit() - driver exit entry point + * @x: function to be run when driver is removed + * + * module_exit() will wrap the driver clean-up code + * with cleanup_module() when used with rmmod when + * the driver is a module. If the driver is statically + * compiled into the kernel, module_exit() has no effect. + * There can only be one per module. + */ +#define module_exit(x) __exitcall(x); + +#else /* MODULE */ + +/* + * In most cases loadable modules do not need custom + * initcall levels. There are still some valid cases where + * a driver may be needed early if built in, and does not + * matter when built as a loadable module. Like bus + * snooping debug drivers. + */ +#define early_initcall(fn) module_init(fn) +#define core_initcall(fn) module_init(fn) +#define core_initcall_sync(fn) module_init(fn) +#define postcore_initcall(fn) module_init(fn) +#define postcore_initcall_sync(fn) module_init(fn) +#define arch_initcall(fn) module_init(fn) +#define subsys_initcall(fn) module_init(fn) +#define subsys_initcall_sync(fn) module_init(fn) +#define fs_initcall(fn) module_init(fn) +#define fs_initcall_sync(fn) module_init(fn) +#define rootfs_initcall(fn) module_init(fn) +#define device_initcall(fn) module_init(fn) +#define device_initcall_sync(fn) module_init(fn) +#define late_initcall(fn) module_init(fn) +#define late_initcall_sync(fn) module_init(fn) + +#define console_initcall(fn) module_init(fn) +#define security_initcall(fn) module_init(fn) + +/* Each module must use one module_init(). */ +#define module_init(initfn) \ + static inline initcall_t __inittest(void) \ + { return initfn; } \ + int init_module(void) __attribute__((alias(#initfn))); + +/* This is only required if you want to be unloadable. */ +#define module_exit(exitfn) \ + static inline exitcall_t __exittest(void) \ + { return exitfn; } \ + void cleanup_module(void) __attribute__((alias(#exitfn))); + +#endif + +/* This means "can be init if no module support, otherwise module load + may call it." */ +#ifdef CONFIG_MODULES +#define __init_or_module +#define __initdata_or_module +#define __initconst_or_module +#define __INIT_OR_MODULE .text +#define __INITDATA_OR_MODULE .data +#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits +#else +#define __init_or_module __init +#define __initdata_or_module __initdata +#define __initconst_or_module __initconst +#define __INIT_OR_MODULE __INIT +#define __INITDATA_OR_MODULE __INITDATA +#define __INITRODATA_OR_MODULE __INITRODATA +#endif /*CONFIG_MODULES*/ + /* Archs provide a method of finding the correct exception table. */ struct exception_table_entry; -- cgit v0.10.2 From 14a0abfc4ab0b49c7e48eca5c6b31288ea457dee Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 26 Jun 2015 12:42:53 +0530 Subject: ARC: Kconfig: better way to disable ARC_HAS_LLSC for ARC_CPU_750D Signed-off-by: Vineet Gupta diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index e7cee0a..91cf405 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -115,6 +115,7 @@ if ISA_ARCOMPACT config ARC_CPU_750D bool "ARC750D" + select ARC_CANT_LLSC help Support for ARC750 core @@ -362,7 +363,7 @@ config ARC_CANT_LLSC config ARC_HAS_LLSC bool "Insn: LLOCK/SCOND (efficient atomic ops)" default y - depends on !ARC_CPU_750D && !ARC_CANT_LLSC + depends on !ARC_CANT_LLSC config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" -- cgit v0.10.2 From b607eddd7122595bbcf49c00192faf3e49b142d3 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Mon, 29 Jun 2015 15:24:37 +0300 Subject: ARCv2: guard SLC DMA ops with spinlock SLC maintenance ops need to be serialized by software as there is no inherent buffering / quequing of aux commands. It can silently ignore a new aux operation if previous one is still ongoing (SLC_CTRL_BUSY) So gaurd the SLC op using a spin lock The spin lock doesn't seem to be contended even in heavy workloads such as iperf. On FPGA @ 75 MHz. [1] Before this change: ============================================================ # iperf -c 10.42.0.1 ------------------------------------------------------------ Client connecting to 10.42.0.1, TCP port 5001 TCP window size: 43.8 KByte (default) ------------------------------------------------------------ [ 3] local 10.42.0.110 port 38935 connected with 10.42.0.1 port 5001 [ ID] Interval Transfer Bandwidth [ 3] 0.0-10.0 sec 48.4 MBytes 40.6 Mbits/sec ============================================================ [2] After this change: ============================================================ # iperf -c 10.42.0.1 ------------------------------------------------------------ Client connecting to 10.42.0.1, TCP port 5001 TCP window size: 43.8 KByte (default) ------------------------------------------------------------ [ 3] local 10.42.0.243 port 60248 connected with 10.42.0.1 port 5001 [ ID] Interval Transfer Bandwidth [ 3] 0.0-10.0 sec 47.5 MBytes 39.8 Mbits/sec # iperf -c 10.42.0.1 ------------------------------------------------------------ Client connecting to 10.42.0.1, TCP port 5001 TCP window size: 43.8 KByte (default) ------------------------------------------------------------ [ 3] local 10.42.0.243 port 60249 connected with 10.42.0.1 port 5001 [ ID] Interval Transfer Bandwidth [ 3] 0.0-10.0 sec 54.9 MBytes 46.0 Mbits/sec ============================================================ Signed-off-by: Alexey Brodkin Cc: arc-linux-dev@synopsys.com Signed-off-by: Vineet Gupta diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index b29d62e..1cd6695 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) { #ifdef CONFIG_ISA_ARCV2 + /* + * SLC is shared between all cores and concurrent aux operations from + * multiple cores need to be serialized using a spinlock + * A concurrent operation can be silently ignored and/or the old/new + * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop + * below) + */ + static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned int ctrl; - local_irq_save(flags); + spin_lock_irqsave(&lock, flags); /* * The Region Flush operation is specified by CTRL.RGN_OP[11..9] @@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); - local_irq_restore(flags); + spin_unlock_irqrestore(&lock, flags); #endif } -- cgit v0.10.2 From 61754c18752ffb78145671e94f053fb202fff041 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Wed, 1 Jul 2015 17:19:30 +0200 Subject: kbuild: Allow arch Makefiles to override {cpp,ld,c}flags Since commit a1c48bb1 (Makefile: Fix unrecognized cross-compiler command line options), the arch Makefile is included earlier by the main Makefile, preventing the arc architecture to set its -O3 compiler option. Since there might be more use cases for an arch Makefile to fine-tune the options, add support for ARCH_CPPFLAGS, ARCH_AFLAGS and ARCH_CFLAGS variables that are appended to the respective kbuild variables. The user still has the final say via the KCPPFLAGS, KAFLAGS and KCFLAGS variables. Reported-by: Vineet Gupta Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: Michal Marek diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index e63b446..13f888a 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly): $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic mode) if this option is supported by $(AR). + ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults + + These variables are appended to the KBUILD_CPPFLAGS, + KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the + top-level Makefile has set any other flags. This provides a + means for an architecture to override the defaults. + + --- 6.2 Add prerequisites to archheaders: The archheaders: rule is used to generate header files that diff --git a/Makefile b/Makefile index 13270c0..8c31fbc 100644 --- a/Makefile +++ b/Makefile @@ -780,10 +780,11 @@ endif include scripts/Makefile.kasan include scripts/Makefile.extrawarn -# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments -KBUILD_CPPFLAGS += $(KCPPFLAGS) -KBUILD_AFLAGS += $(KAFLAGS) -KBUILD_CFLAGS += $(KCFLAGS) +# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the +# last assignments +KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS) +KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS) +KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS) # Use --build-id when available. LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ -- cgit v0.10.2 From 97709069214eb75312c14946803b9da4d3814203 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 18 Jun 2015 13:54:01 +0530 Subject: ARC: Override toplevel default -O2 with -O3 ARC kernels have historically been built with -O3, despite top level Makefile defaulting to -O2. This was facilitated by implicitly ordering of arch makefile include AFTER top level assigned -O2. An upstream fix to top level a1c48bb160f ("Makefile: Fix unrecognized cross-compiler command line options") changed the ordering, making ARC -O3 defunct. Fix that by NOT relying on any ordering whatsoever and use the proper arch override facility now present in kbuild (ARCH_*FLAGS) Depends-on: ("kbuild: Allow arch Makefiles to override {cpp,ld,c}flags") Suggested-by: Michal Marek Cc: Geert Uytterhoeven Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: Vineet Gupta diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 6107062..46d8731 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -49,7 +49,8 @@ endif ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE # Generic build system uses -O2, we want -O3 -cflags-y += -O3 +# Note: No need to add to cflags-y as that happens anyways +ARCH_CFLAGS += -O3 endif # small data is default for elf32 tool-chain. If not usable, disable it -- cgit v0.10.2 From f718c2efff0b0460e5335607a1c6caf620847680 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 3 Jul 2015 10:40:43 +0530 Subject: ARC: Don't memzero twice in dma_alloc_coherent for __GFP_ZERO alloc_pages_exact() get gfp flags and handle zero'ing already And while it, fix the case where ioremap fails: return rightaway. Signed-off-by: Vineet Gupta diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 74a637a..57706a9 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size, /* This is kernel Virtual address (0x7000_0000 based) */ kvaddr = ioremap_nocache((unsigned long)paddr, size); - if (kvaddr != NULL) - memset(kvaddr, 0, size); + if (kvaddr == NULL) + return NULL; /* This is bus address, platform dependent */ *dma_handle = (dma_addr_t)paddr; -- cgit v0.10.2 From bccea41ec02301a439a26e43fa49521ae8da4352 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 6 Jul 2015 10:25:01 +0530 Subject: ARC: fix unused var wanring Fixes: 9bf39ab2adaf ("vfs: add file_path() helper") Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 807f7d6..a6f91e8 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs) static void print_task_path_n_nm(struct task_struct *tsk, char *buf) { - struct path path; char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; -- cgit v0.10.2 From 83ce3e6fcc16b4d36d40765618777b5a6a30d75b Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 30 Jun 2015 13:37:28 +0530 Subject: ARCv2: intc: IDU: support irq affinity With this nsim standlone / OSCI have working irq affinity - AXS103 still needs some work as IDU is not visible in intc hierarchy yet ! Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 30284e8..dfeea22 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -218,11 +218,28 @@ static void idu_irq_unmask(struct irq_data *data) raw_spin_unlock_irqrestore(&mcip_lock, flags); } +#ifdef CONFIG_SMP static int -idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f) +idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, + bool force) { + unsigned long flags; + cpumask_t online; + + /* errout if no online cpu per @cpumask */ + if (!cpumask_and(&online, cpumask, cpu_online_mask)) + return -EINVAL; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + idu_set_dest(data->hwirq, cpumask_bits(&online)[0]); + idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); + return IRQ_SET_MASK_OK; } +#endif static struct irq_chip idu_irq_chip = { .name = "MCIP IDU Intc", -- cgit v0.10.2 From 6b12ec177c410ef984d2b97717df77c9269eaeac Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 2 Jul 2015 14:02:54 +0530 Subject: ARCv2: intc: IDU: Fix potential race in installing a chained IRQ handler Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index dfeea22..6fb0a2f 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -347,8 +347,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent) if (!i) idu_first_irq = irq; - irq_set_handler_data(irq, domain); - irq_set_chained_handler(irq, idu_cascade_isr); + irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); -- cgit v0.10.2 From 5aac644a9944bea93b4f05ced1883a902a2535f6 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 3 Jun 2015 10:39:46 +0300 Subject: x86/tsc: Let high latency PIT fail fast in quick_pit_calibrate() If it takes longer than 12us to read the PIT counter lsb/msb, then the error margin will never fall below 500ppm within 50ms, and Fast TSC calibration will always fail. This patch detects when that will happen and fails fast. Note the failure message is not printed in that case because: 1. it will always happen on that class of hardware 2. the absence of the message is more informative than its presence Signed-off-by: Adrian Hunter Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Len Brown Cc: Andi Kleen Link: http://lkml.kernel.org/r/556EB717.9070607@intel.com Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 5054497..7437b41 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -598,10 +598,19 @@ static unsigned long quick_pit_calibrate(void) if (!pit_expect_msb(0xff-i, &delta, &d2)) break; + delta -= tsc; + + /* + * Extrapolate the error and fail fast if the error will + * never be below 500 ppm. + */ + if (i == 1 && + d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) + return 0; + /* * Iterate until the error is less than 500 ppm */ - delta -= tsc; if (d1+d2 >= delta >> 11) continue; -- cgit v0.10.2 From acb33cc541d7a5495b16a133702d4c401ea4e294 Mon Sep 17 00:00:00 2001 From: "Vutla, Lokesh" Date: Thu, 2 Jul 2015 18:33:28 +0530 Subject: crypto: omap-des - Fix unmapping of dma channels dma_unmap_sg() is being called twice after completing the task. Looks like this is a copy paste error when creating des driver. With this the following warn appears during boot: [ 4.210457] ------------[ cut here ]------------ [ 4.215114] WARNING: CPU: 0 PID: 0 at lib/dma-debug.c:1080 check_unmap+0x710/0x9a0() [ 4.222899] omap-des 480a5000.des: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x00000000ab2ce000] [size=8 bytes] [ 4.236785] Modules linked in: [ 4.239860] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.14.39-02999-g1bc045a-dirty #182 [ 4.247918] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 4.255710] [] (show_stack) from [] (dump_stack+0x84/0xb8) [ 4.262977] [] (dump_stack) from [] (warn_slowpath_common+0x68/0x8c) [ 4.271107] [] (warn_slowpath_common) from [] (warn_slowpath_fmt+0x30/0x40) [ 4.279854] [] (warn_slowpath_fmt) from [] (check_unmap+0x710/0x9a0) [ 4.287991] [] (check_unmap) from [] (debug_dma_unmap_sg+0x90/0x19c) [ 4.296128] [] (debug_dma_unmap_sg) from [] (omap_des_done_task+0x1cc/0x3e4) [ 4.304963] [] (omap_des_done_task) from [] (tasklet_action+0x84/0x124) [ 4.313370] [] (tasklet_action) from [] (__do_softirq+0xf0/0x20c) [ 4.321235] [] (__do_softirq) from [] (irq_exit+0x98/0xec) [ 4.328500] [] (irq_exit) from [] (handle_IRQ+0x50/0xb0) [ 4.335589] [] (handle_IRQ) from [] (gic_handle_irq+0x28/0x5c) Removing the duplicate call to dma_unmap_sg(). Cc: stable@vger.kernel.org Reported-by: Tomi Valkeinen Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 4630709..0a70e46 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); - return err; } -- cgit v0.10.2 From e828b23734bfebe30e95964302bbb801d02fddf6 Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Mon, 6 Jul 2015 10:44:26 +0800 Subject: ALSA: hda - add codec ID for Broxton display audio codec This patch adds codec ID (0x8086280a) and module alias for Broxton display codec. Signed-off-by: Libin Yang Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2f24338..9515891 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3527,6 +3527,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi }, { .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi }, { .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi }, +{ .id = 0x8086280a, .name = "Broxton HDMI", .patch = patch_generic_hdmi }, { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi }, { .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi }, { .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi }, @@ -3591,6 +3592,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862806"); MODULE_ALIAS("snd-hda-codec-id:80862807"); MODULE_ALIAS("snd-hda-codec-id:80862808"); MODULE_ALIAS("snd-hda-codec-id:80862809"); +MODULE_ALIAS("snd-hda-codec-id:8086280a"); MODULE_ALIAS("snd-hda-codec-id:80862880"); MODULE_ALIAS("snd-hda-codec-id:80862882"); MODULE_ALIAS("snd-hda-codec-id:80862883"); -- cgit v0.10.2 From 0d7b6b1182ef6f72be592688c8a22025a5b7b483 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 2 Jul 2015 14:29:58 +0300 Subject: drm/i915/chv: fix HW readout of the port PLL fractional divider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ville noticed that the PLL HW readout code parsed the fractional divider value as if the fractional divider was always enabled. This may result in a port clock state check mismatch if the preceeding modeset disabled the fractional divider, but left a non-zero divider value in the register. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1b61f98..8aa8038 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7887,7 +7887,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, int pipe = pipe_config->cpu_transcoder; enum dpio_channel port = vlv_pipe_to_channel(pipe); intel_clock_t clock; - u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; + u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; mutex_lock(&dev_priv->sb_lock); @@ -7895,10 +7895,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); + pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); mutex_unlock(&dev_priv->sb_lock); clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; - clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); + clock.m2 = (pll_dw0 & 0xff) << 22; + if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) + clock.m2 |= pll_dw2 & 0x3fffff; clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; -- cgit v0.10.2 From f6d7fb37f92622479ef6da604f27561f5045ba1e Mon Sep 17 00:00:00 2001 From: Claudio Cappelli Date: Wed, 10 Jun 2015 20:38:30 +0200 Subject: USB: option: add 2020:4000 ID Add device Olivetti Olicard 300 (Network Connect: MT6225) - IDs 2020:4000. T: Bus=01 Lev=02 Prnt=04 Port=00 Cnt=01 Dev#= 10 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=2020 ProdID=4000 Rev=03.00 S: Manufacturer=Network Connect S: Product=MT6225 C: #Ifs= 7 Cfg#= 1 Atr=a0 MxPwr=500mA I: If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim I: If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim I: If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=02 Prot=01 Driver=option I: If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 6 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=usb-storage Signed-off-by: Claudio Cappelli Suggested-by: Lars Melin Cc: stable [johan: amend commit message with devices info ] Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f0c0c53..19b85ee 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1765,6 +1765,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { } /* Terminating entry */ -- cgit v0.10.2 From ee5729ece70b2dacd8fc6ed928f9f81ced9f380e Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Tue, 23 Jun 2015 18:59:40 +0530 Subject: USB: mos7720: rename registers Some of the register names defined here are matching with registers defined in other places. Like DCR is defined here and DCR is also a register in mn10300 architecture. So when we are building this with mn10300, build fails. To avoid we rename all the registers. Signed-off-by: Sudip Mukherjee Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 4f70df3..78b4f64 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock); static const unsigned int dummy; /* for clarity in register access fns */ enum mos_regs { - THR, /* serial port regs */ - RHR, - IER, - FCR, - ISR, - LCR, - MCR, - LSR, - MSR, - SPR, - DLL, - DLM, - DPR, /* parallel port regs */ - DSR, - DCR, - ECR, - SP1_REG, /* device control regs */ - SP2_REG, /* serial port 2 (7720 only) */ - PP_REG, - SP_CONTROL_REG, + MOS7720_THR, /* serial port regs */ + MOS7720_RHR, + MOS7720_IER, + MOS7720_FCR, + MOS7720_ISR, + MOS7720_LCR, + MOS7720_MCR, + MOS7720_LSR, + MOS7720_MSR, + MOS7720_SPR, + MOS7720_DLL, + MOS7720_DLM, + MOS7720_DPR, /* parallel port regs */ + MOS7720_DSR, + MOS7720_DCR, + MOS7720_ECR, + MOS7720_SP1_REG, /* device control regs */ + MOS7720_SP2_REG, /* serial port 2 (7720 only) */ + MOS7720_PP_REG, + MOS7720_SP_CONTROL_REG, }; /* @@ -150,26 +150,26 @@ enum mos_regs { static inline __u16 get_reg_index(enum mos_regs reg) { static const __u16 mos7715_index_lookup_table[] = { - 0x00, /* THR */ - 0x00, /* RHR */ - 0x01, /* IER */ - 0x02, /* FCR */ - 0x02, /* ISR */ - 0x03, /* LCR */ - 0x04, /* MCR */ - 0x05, /* LSR */ - 0x06, /* MSR */ - 0x07, /* SPR */ - 0x00, /* DLL */ - 0x01, /* DLM */ - 0x00, /* DPR */ - 0x01, /* DSR */ - 0x02, /* DCR */ - 0x0a, /* ECR */ - 0x01, /* SP1_REG */ - 0x02, /* SP2_REG (7720 only) */ - 0x04, /* PP_REG (7715 only) */ - 0x08, /* SP_CONTROL_REG */ + 0x00, /* MOS7720_THR */ + 0x00, /* MOS7720_RHR */ + 0x01, /* MOS7720_IER */ + 0x02, /* MOS7720_FCR */ + 0x02, /* MOS7720_ISR */ + 0x03, /* MOS7720_LCR */ + 0x04, /* MOS7720_MCR */ + 0x05, /* MOS7720_LSR */ + 0x06, /* MOS7720_MSR */ + 0x07, /* MOS7720_SPR */ + 0x00, /* MOS7720_DLL */ + 0x01, /* MOS7720_DLM */ + 0x00, /* MOS7720_DPR */ + 0x01, /* MOS7720_DSR */ + 0x02, /* MOS7720_DCR */ + 0x0a, /* MOS7720_ECR */ + 0x01, /* MOS7720_SP1_REG */ + 0x02, /* MOS7720_SP2_REG (7720 only) */ + 0x04, /* MOS7720_PP_REG (7715 only) */ + 0x08, /* MOS7720_SP_CONTROL_REG */ }; return mos7715_index_lookup_table[reg]; } @@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg) static inline __u16 get_reg_value(enum mos_regs reg, unsigned int serial_portnum) { - if (reg >= SP1_REG) /* control reg */ + if (reg >= MOS7720_SP1_REG) /* control reg */ return 0x0000; - else if (reg >= DPR) /* parallel port reg (7715 only) */ + else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */ return 0x0100; else /* serial port reg */ @@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport, enum mos7715_pp_modes mode) { mos_parport->shadowECR = mode; - write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR, + mos_parport->shadowECR); return 0; } @@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d) if (parport_prologue(pp) < 0) return; mos7715_change_mode(mos_parport, SPP); - write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d); parport_epilogue(pp); } @@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp) if (parport_prologue(pp) < 0) return 0; - read_mos_reg(mos_parport->serial, dummy, DPR, &d); + read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d); parport_epilogue(pp); return d; } @@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d) if (parport_prologue(pp) < 0) return; data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0); - write_mos_reg(mos_parport->serial, dummy, DCR, data); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data); mos_parport->shadowDCR = data; parport_epilogue(pp); } @@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp, if (parport_prologue(pp) < 0) return 0; mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); dcr = mos_parport->shadowDCR & 0x0f; parport_epilogue(pp); return dcr; @@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp) return; mos7715_change_mode(mos_parport, PS2); mos_parport->shadowDCR &= ~0x20; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); parport_epilogue(pp); } @@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp) return; mos7715_change_mode(mos_parport, PS2); mos_parport->shadowDCR |= 0x20; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); parport_epilogue(pp); } @@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp, spin_unlock(&release_lock); return; } - write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR); - write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR); + write_parport_reg_nonblock(mos_parport, MOS7720_DCR, + mos_parport->shadowDCR); + write_parport_reg_nonblock(mos_parport, MOS7720_ECR, + mos_parport->shadowECR); spin_unlock(&release_lock); } @@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial) init_completion(&mos_parport->syncmsg_compl); /* cycle parallel port reset bit */ - write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80); - write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00); + write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80); + write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00); /* initialize device registers */ mos_parport->shadowDCR = DCR_INIT_VAL; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); mos_parport->shadowECR = ECR_INIT_VAL; - write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR, + mos_parport->shadowECR); /* register with parport core */ mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE, @@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) /* Initialize MCS7720 -- Write Init values to corresponding Registers * * Register Index - * 0 : THR/RHR - * 1 : IER - * 2 : FCR - * 3 : LCR - * 4 : MCR - * 5 : LSR - * 6 : MSR - * 7 : SPR + * 0 : MOS7720_THR/MOS7720_RHR + * 1 : MOS7720_IER + * 2 : MOS7720_FCR + * 3 : MOS7720_LCR + * 4 : MOS7720_MCR + * 5 : MOS7720_LSR + * 6 : MOS7720_MSR + * 7 : MOS7720_SPR * * 0x08 : SP1/2 Control Reg */ port_number = port->port_number; - read_mos_reg(serial, port_number, LSR, &data); + read_mos_reg(serial, port_number, MOS7720_LSR, &data); dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data); - write_mos_reg(serial, dummy, SP1_REG, 0x02); - write_mos_reg(serial, dummy, SP2_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02); - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); mos7720_port->shadowLCR = 0x03; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); - write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00); - read_mos_reg(serial, dummy, SP_CONTROL_REG, &data); + write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00); + read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data); data = data | (port->port_number + 1); - write_mos_reg(serial, dummy, SP_CONTROL_REG, data); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data); mos7720_port->shadowLCR = 0x83; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, THR, 0x0c); - write_mos_reg(serial, port_number, IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_THR, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); mos7720_port->shadowLCR = 0x03; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); response = usb_submit_urb(port->read_urb, GFP_KERNEL); if (response) @@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port) usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); - write_mos_reg(serial, port->port_number, MCR, 0x00); - write_mos_reg(serial, port->port_number, IER, 0x00); + write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00); + write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00); mos7720_port->open = 0; } @@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state) data = mos7720_port->shadowLCR & ~UART_LCR_SBC; mos7720_port->shadowLCR = data; - write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port->port_number, MOS7720_LCR, + mos7720_port->shadowLCR); } /* @@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty) /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7720_port->shadowMCR &= ~UART_MCR_RTS; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); } } @@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty) /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7720_port->shadowMCR |= UART_MCR_RTS; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); } } @@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port, dev_dbg(&port->dev, "Sending Setting Commands ..........\n"); port_number = port->port_number; - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00); /*********************************************** * Set for higher rates * ***********************************************/ /* writing baud rate verbatum into uart clock field clearly not right */ if (port_number == 0) - sp_reg = SP1_REG; + sp_reg = MOS7720_SP1_REG; else - sp_reg = SP2_REG; + sp_reg = MOS7720_SP2_REG; write_mos_reg(serial, dummy, sp_reg, baud * 0x10); - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03); mos7720_port->shadowMCR = 0x2b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /*********************************************** * Set DLL/DLM ***********************************************/ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, DLL, 0x01); - write_mos_reg(serial, port_number, DLM, 0x00); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_DLL, 0x01); + write_mos_reg(serial, port_number, MOS7720_DLM, 0x00); mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); return 0; } @@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port, /* Enable access to divisor latch */ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; - write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR); /* Write the divisor */ - write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff)); - write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8)); + write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff)); + write_mos_reg(serial, number, MOS7720_DLM, + (__u8)((divisor & 0xff00) >> 8)); /* Disable access to divisor latch */ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; - write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR); return status; } @@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty, /* Disable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); /* Send the updated LCR value to the mos7720 */ - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /* set up the MCR register and send it to the mos7720 */ mos7720_port->shadowMCR = UART_MCR_OUT2; @@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty, /* To set hardware flow control to the specified * * serial port, in SP1/2_CONTROL_REG */ if (port_number) - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, + 0x01); else - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, + 0x02); } else mos7720_port->shadowMCR &= ~(UART_MCR_XONANY); - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); @@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty, if (baud >= 230400) { set_higher_rates(mos7720_port, baud); /* Enable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); return; } @@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty, if (cflag & CBAUD) tty_encode_baud_rate(tty, baud, baud); /* Enable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); if (port->read_urb->status != -EINPROGRESS) { status = usb_submit_urb(port->read_urb, GFP_KERNEL); @@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty, count = mos7720_chars_in_buffer(tty); if (count == 0) { - read_mos_reg(port->serial, port_number, LSR, &data); + read_mos_reg(port->serial, port_number, MOS7720_LSR, &data); if ((data & (UART_LSR_TEMT | UART_LSR_THRE)) == (UART_LSR_TEMT | UART_LSR_THRE)) { dev_dbg(&port->dev, "%s -- Empty\n", __func__); @@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty, mcr &= ~UART_MCR_LOOP; mos7720_port->shadowMCR = mcr; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); return 0; @@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd, } mos7720_port->shadowMCR = mcr; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); return 0; @@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial) } #endif /* LSR For Port 1 */ - read_mos_reg(serial, 0, LSR, &data); + read_mos_reg(serial, 0, MOS7720_LSR, &data); dev_dbg(&dev->dev, "LSR:%x\n", data); return 0; -- cgit v0.10.2 From 14f21189df33bc972455d6a0ed875aa68718d7fc Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Mon, 29 Jun 2015 16:05:11 +0530 Subject: cxl/vphb.c: Use phb pointer after NULL check static Anlaysis detected below error:- (error) Possible null pointer dereference: phb So, Use phb after NULL check. Signed-off-by: Maninder Singh Acked-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index b1d1983a..2eba002 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, unsigned long addr; phb = pci_bus_to_host(bus); - afu = (struct cxl_afu *)phb->private_data; if (phb == NULL) return PCIBIOS_DEVICE_NOT_FOUND; + afu = (struct cxl_afu *)phb->private_data; + if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) return PCIBIOS_DEVICE_NOT_FOUND; if (offset >= (unsigned long)phb->cfg_data) -- cgit v0.10.2 From 27ea2c420cad57386c25668cb9a9f0f082635586 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Mon, 15 Jun 2015 13:25:19 +1000 Subject: powerpc: Set the correct kernel taint on machine check errors. This means the 'M' flag will work properly when the kernel prints a backtrace. Signed-off-by: Daniel Axtens Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6530f1b..37de90f 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs) __this_cpu_inc(irq_stat.mce_exceptions); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) handled = cur_cpu_spec->machine_check_early(regs); return handled; -- cgit v0.10.2 From 8c00d5c9d3e7452658dda55024485d52919ebfdd Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Thu, 2 Jul 2015 15:55:21 +1000 Subject: cxl: Test the correct mmio space before unmapping Before freeing p2n, test p2n, not p1n. Signed-off-by: Daniel Axtens Acked-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index c68ef58..32ad097 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -539,7 +539,7 @@ err: static void cxl_unmap_slice_regs(struct cxl_afu *afu) { - if (afu->p1n_mmio) + if (afu->p2n_mmio) iounmap(afu->p2n_mmio); if (afu->p1n_mmio) iounmap(afu->p1n_mmio); -- cgit v0.10.2 From eab861a7a52208868637f47a92caa926ddadd9c7 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 2 Jul 2015 14:56:20 +1000 Subject: powerpc: Add plain English description for alignment exception oopses If we take an alignment exception which we cannot fix, the oops currently prints: Unable to handle kernel paging request for unknown fault Lets print something more useful: Unable to handle kernel paging request for unaligned access at address 0xc0000000f77bba8f Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6d53597..a67c6d7 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) printk(KERN_ALERT "Unable to handle kernel paging request for " "instruction fetch\n"); break; + case 0x600: + printk(KERN_ALERT "Unable to handle kernel paging request for " + "unaligned access at address 0x%08lx\n", regs->dar); + break; default: printk(KERN_ALERT "Unable to handle kernel paging request for " "unknown fault\n"); -- cgit v0.10.2 From aaf6fd5c75eb4aa734ec2062deab371633c3655f Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Mon, 6 Jul 2015 09:40:34 +1000 Subject: powerpc/ppc4xx_hsta_msi: Include ppc-pci.h to fix reference to hose_list An earlier commit referenced 'hose_list' in sysdev/ppc4xx_hsta_msi.c. hose_list is defined in ppc-pci.h, which was not included in that file. Include it, fixing the build for the akebono defconfig used by the kbuild test robot. Fixes: f2c800aaceb6 ("powerpc/ppc4xx_hsta_msi: Move MSI-related ops to pci_controller_ops") Reported-by: kbuild test robot Signed-off-by: Daniel Axtens Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c index 2bc3367..87f9623 100644 --- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c @@ -18,6 +18,7 @@ #include #include #include +#include struct ppc4xx_hsta_msi { struct device *dev; -- cgit v0.10.2 From a8956a7b7232da5f4ce4a305c72a54cc5e4a8307 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 3 Jul 2015 17:39:12 +1000 Subject: powerpc/powernv: Fix opal-elog interrupt handler The conversion of opal events to a proper irqchip means that handlers are called until the relevant opal event has been cleared by processing it. Events that queue work should therefore use a threaded handler to mask the event until processing is complete. Signed-off-by: Alistair Popple Tested-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 4949ef0..37f959b 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) return elog; } -static void elog_work_fn(struct work_struct *work) +static irqreturn_t elog_event(int irq, void *data) { __be64 size; __be64 id; @@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work) rc = opal_get_elog_size(&id, &size, &type); if (rc != OPAL_SUCCESS) { pr_err("ELOG: OPAL log info read failed\n"); - return; + return IRQ_HANDLED; } elog_size = be64_to_cpu(size); @@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work) * entries. */ if (kset_find_obj(elog_kset, name)) - return; + return IRQ_HANDLED; create_elog_obj(log_id, elog_size, elog_type); -} - -static DECLARE_WORK(elog_work, elog_work_fn); -static irqreturn_t elog_event(int irq, void *data) -{ - schedule_work(&elog_work); return IRQ_HANDLED; } @@ -304,8 +298,8 @@ int __init opal_elog_init(void) return irq; } - rc = request_irq(irq, elog_event, - IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL); + rc = request_threaded_irq(irq, NULL, elog_event, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL); if (rc) { pr_err("%s: Can't request OPAL event irq (%d)\n", __func__, rc); -- cgit v0.10.2 From f98a7aa81eeeadcad25665c3501c236d531d4382 Mon Sep 17 00:00:00 2001 From: Peter Sanford Date: Thu, 25 Jun 2015 17:40:05 -0700 Subject: USB: cp210x: add ID for Aruba Networks controllers Add the USB serial console device ID for Aruba Networks 7xxx series controllers which have a USB port for their serial console. Signed-off-by: Peter Sanford Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index ffd739e..eac7cca 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ + { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ -- cgit v0.10.2 From 57ffc5ca679f499f4704fd9b6a372916f59930ee Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 18 Jun 2015 12:32:49 +0200 Subject: perf: Fix AUX buffer refcounting Its currently possible to drop the last refcount to the aux buffer from NMI context, which results in the expected fireworks. The refcounting needs a bigger overhaul, but to cure the immediate problem, delay the freeing by using an irq_work. Reviewed-and-tested-by: Alexander Shishkin Reported-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20150618103249.GK19282@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/kernel/events/core.c b/kernel/events/core.c index e965cfa..d3dae34 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event) rcu_read_unlock(); } -static void rb_free_rcu(struct rcu_head *rcu_head) -{ - struct ring_buffer *rb; - - rb = container_of(rcu_head, struct ring_buffer, rcu_head); - rb_free(rb); -} - struct ring_buffer *ring_buffer_get(struct perf_event *event) { struct ring_buffer *rb; diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 2deb24c..2bbad9c 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -11,6 +11,7 @@ struct ring_buffer { atomic_t refcount; struct rcu_head rcu_head; + struct irq_work irq_work; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; int page_order; /* allocation order */ @@ -55,6 +56,15 @@ struct ring_buffer { }; extern void rb_free(struct ring_buffer *rb); + +static inline void rb_free_rcu(struct rcu_head *rcu_head) +{ + struct ring_buffer *rb; + + rb = container_of(rcu_head, struct ring_buffer, rcu_head); + rb_free(rb); +} + extern struct ring_buffer * rb_alloc(int nr_pages, long watermark, int cpu, int flags); extern void perf_event_wakeup(struct perf_event *event); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 9647282..b2be01b 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle) rcu_read_unlock(); } +static void rb_irq_work(struct irq_work *work); + static void ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) { @@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) INIT_LIST_HEAD(&rb->event_list); spin_lock_init(&rb->event_lock); + init_irq_work(&rb->irq_work, rb_irq_work); +} + +static void ring_buffer_put_async(struct ring_buffer *rb) +{ + if (!atomic_dec_and_test(&rb->refcount)) + return; + + rb->rcu_head.next = (void *)rb; + irq_work_queue(&rb->irq_work); } /* @@ -319,7 +331,7 @@ err_put: rb_free_aux(rb); err: - ring_buffer_put(rb); + ring_buffer_put_async(rb); handle->event = NULL; return NULL; @@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, local_set(&rb->aux_nest, 0); rb_free_aux(rb); - ring_buffer_put(rb); + ring_buffer_put_async(rb); } /* @@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb) void rb_free_aux(struct ring_buffer *rb) { if (atomic_dec_and_test(&rb->aux_refcount)) + irq_work_queue(&rb->irq_work); +} + +static void rb_irq_work(struct irq_work *work) +{ + struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work); + + if (!atomic_read(&rb->aux_refcount)) __rb_free_aux(rb); + + if (rb->rcu_head.next == (void *)rb) + call_rcu(&rb->rcu_head, rb_free_rcu); } #ifndef CONFIG_PERF_USE_VMALLOC -- cgit v0.10.2 From ebf2d2689de551d90965090bb991fc640a0c0d41 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Mon, 22 Jun 2015 21:38:43 +0200 Subject: perf/x86: Fix copy_from_user_nmi() return if range is not ok Commit 0a196848ca36 ("perf: Fix arch_perf_out_copy_user default"), changes copy_from_user_nmi() to return the number of remaining bytes so that it behave like copy_from_user(). Unfortunately, when the range is outside of the process memory, the return value is still the number of byte copied, eg. 0, instead of the remaining bytes. As all users of copy_from_user_nmi() were modified as part of commit 0a196848ca36, the function should be fixed to return the total number of bytes if range is not correct. Signed-off-by: Yann Droneaud Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435001923-30986-1-git-send-email-ydroneaud@opteya.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index ddf9ecb..e342586 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) unsigned long ret; if (__range_not_ok(from, n, TASK_SIZE)) - return 0; + return n; /* * Even though this function is typically called from NMI/IRQ context -- cgit v0.10.2 From 5c250adb51ca318b67ac3a230a565dbe2ea9f0ef Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 11 Jun 2015 16:18:38 +0200 Subject: Revert "ARM: dts: am335x-boneblack: disable RTC-only sleep" This reverts commit 3d76be5b933e2a66d85a2f7444e68e99e8a48ad4. The latest revision of Beaglebone Black does not support RTC-only mode. To avoid potential hardware damage, RTC-only mode was disabled by default by commit 7a6cb0abe1aa ("ARM: dts: am335x-boneblack: disable RTC-only sleep to avoid hardware damage"). Unfortunately, an incorrect fix had already been applied, which instead of just disabling RTC-only mode, prevents the Beaglebone from powering down at all. Revert this patch to fix the power-off regression. Signed-off-by: Johan Hovold Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 901739f..5c42d25 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -80,3 +80,7 @@ status = "okay"; }; }; + +&rtc { + system-power-controller; +}; -- cgit v0.10.2 From fff75ee150104fdc29d86a75a5511482a981c27b Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Wed, 6 May 2015 12:25:33 -0500 Subject: ARM: dts: am4372: Add emif node Add node for TI AM4372 EMIF. Without this we get a warning with the recent commit fabbe6df (ARM: OMAP: AM43xx hwmod: Add data for am43xx emif hwmod). Signed-off-by: Dave Gerlach Tested-by: Felipe Balbi Acked-by: Felipe Balbi [tony@atomide.com: updated comments] Signed-off-by: Tony Lindgren diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt index 938f8e1..0db6047 100644 --- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt +++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt @@ -8,6 +8,7 @@ of the EMIF IP and memory parts attached to it. Required properties: - compatible : Should be of the form "ti,emif-" where is the IP revision of the specific EMIF instance. + For am437x should be ti,emif-am4372. - phy-type : indicating the DDR phy type. Following are the allowed values diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index c80a3e2..9521a38 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -132,6 +132,12 @@ }; }; + emif: emif@4c000000 { + compatible = "ti,emif-am4372"; + reg = <0x4c000000 0x1000000>; + ti,hwmods = "emif"; + }; + edma: edma@49000000 { compatible = "ti,edma3"; ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; -- cgit v0.10.2 From d49db342f0e276b354383b3281c5668b6b80f5c2 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 24 Jun 2015 12:41:47 -0700 Subject: sched/fair: Test list head instead of list entry in throttle_cfs_rq() According to the comments, we need to test if this is the first throttled task, however, list_empty() tests on the entry cfs_rq->throttled_list, not the head, this is wrong. This is a bug because we don't re-init the list entry after removing it from the list, so list_empty() could return false even if the list is really empty. Signed-off-by: Cong Wang Signed-off-by: Cong Wang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ben Segall Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435174907-432-1-git-send-email-xiyou.wangcong@gmail.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 65c8f3e..d113c3b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) cfs_rq->throttled = 1; cfs_rq->throttled_clock = rq_clock(rq); raw_spin_lock(&cfs_b->lock); - empty = list_empty(&cfs_rq->throttled_list); + empty = list_empty(&cfs_b->throttled_cfs_rq); /* * Add to the _head_ of the list, so that an already-started -- cgit v0.10.2 From 9ab402aed38b95d9ce453108622be0fc6f167568 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Wed, 17 Jun 2015 17:52:43 +0300 Subject: ARM: dts: am57xx-beagle-x15: Provide supply for usb2_phy2 Without this USB2 breaks if USB1 is disabled or USB1 initializes after USB2 e.g. due to deferred probing. Fixes: 5a0f93c6576a ("ARM: dts: Add am57xx-beagle-x15") Signed-off-by: Roger Quadros Cc: stable@vger.kernel.org (v3.19+) Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index a42cc37..a63bf78 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -605,6 +605,10 @@ phy-supply = <&ldousb_reg>; }; +&usb2_phy2 { + phy-supply = <&ldousb_reg>; +}; + &usb1 { dr_mode = "host"; pinctrl-names = "default"; -- cgit v0.10.2 From 22a5dc10e3f8fb8370748ea19dc4e3e1620d8296 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 30 Jun 2015 15:04:54 +0300 Subject: ARM: dts: am4372.dtsi: disable rfbi When DSS nodes were added to am4372.dtsi, the rfbi node was not marked as disabled. This should have been done, as the rule of thumb is to disable all DSS nodes that are not used, and especially rfbi, as we don't have a driver for rfbi. Signed-off-by: Tomi Valkeinen Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 9521a38..ade28c79 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -947,6 +947,7 @@ ti,hwmods = "dss_rfbi"; clocks = <&disp_clk>; clock-names = "fck"; + status = "disabled"; }; }; -- cgit v0.10.2 From d0f77d4d04b222a817925d33ba3589b190bfa863 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 2 Jul 2015 12:09:33 +0300 Subject: x86/init: Clear 'init_level4_pgt' earlier Currently x86_64_start_kernel() has two KASAN related function calls. The first call maps shadow to early_level4_pgt, the second maps shadow to init_level4_pgt. If we move clear_page(init_level4_pgt) earlier, we could hide KASAN low level detail from generic x86_64 initialization code. The next patch will do it. Signed-off-by: Andrey Ryabinin Cc: # 4.0+ Cc: Alexander Popov Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435828178-10975-2-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 5a46681..65c8985 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -166,6 +166,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); + clear_page(init_level4_pgt); + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) set_intr_gate(i, early_idt_handler_array[i]); load_idt((const struct desc_ptr *)&idt_descr); @@ -177,7 +179,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) */ load_ucode_bsp(); - clear_page(init_level4_pgt); /* set init_level4_pgt kernel high mapping*/ init_level4_pgt[511] = early_level4_pgt[511]; -- cgit v0.10.2 From 5d5aa3cfca5cf74cd928daf3674642e6004328d1 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Thu, 2 Jul 2015 12:09:34 +0300 Subject: x86/kasan: Fix KASAN shadow region page tables Currently KASAN shadow region page tables created without respect of physical offset (phys_base). This causes kernel halt when phys_base is not zero. So let's initialize KASAN shadow region page tables in kasan_early_init() using __pa_nodebug() which considers phys_base. This patch also separates x86_64_start_kernel() from KASAN low level details by moving kasan_map_early_shadow(init_level4_pgt) into kasan_early_init(). Remove the comment before clear_bss() which stopped bringing much profit to the code readability. Otherwise describing all the new order dependencies would be too verbose. Signed-off-by: Alexander Popov Signed-off-by: Andrey Ryabinin Cc: # 4.0+ Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435828178-10975-3-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 8b22422..74a2a8d 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -14,15 +14,11 @@ #ifndef __ASSEMBLY__ -extern pte_t kasan_zero_pte[]; -extern pte_t kasan_zero_pmd[]; -extern pte_t kasan_zero_pud[]; - #ifdef CONFIG_KASAN -void __init kasan_map_early_shadow(pgd_t *pgd); +void __init kasan_early_init(void); void __init kasan_init(void); #else -static inline void kasan_map_early_shadow(pgd_t *pgd) { } +static inline void kasan_early_init(void) { } static inline void kasan_init(void) { } #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 65c8985..f129a9a 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -161,13 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) /* Kill off the identity-map trampoline */ reset_early_page_tables(); - kasan_map_early_shadow(early_level4_pgt); - - /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); clear_page(init_level4_pgt); + kasan_early_init(); + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) set_intr_gate(i, early_idt_handler_array[i]); load_idt((const struct desc_ptr *)&idt_descr); @@ -182,8 +181,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) /* set init_level4_pgt kernel high mapping*/ init_level4_pgt[511] = early_level4_pgt[511]; - kasan_map_early_shadow(init_level4_pgt); - x86_64_start_reservations(real_mode_data); } diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e5c27f7..1d40ca8 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -516,38 +516,9 @@ ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 -#ifdef CONFIG_KASAN -#define FILL(VAL, COUNT) \ - .rept (COUNT) ; \ - .quad (VAL) ; \ - .endr - -NEXT_PAGE(kasan_zero_pte) - FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512) -NEXT_PAGE(kasan_zero_pmd) - FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512) -NEXT_PAGE(kasan_zero_pud) - FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512) - -#undef FILL -#endif - - #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE -#ifdef CONFIG_KASAN -/* - * This page used as early shadow. We don't use empty_zero_page - * at early stages, stack instrumentation could write some garbage - * to this page. - * Latter we reuse it as zero shadow for large ranges of memory - * that allowed to access, but not instrumented by kasan - * (vmalloc/vmemmap ...). - */ -NEXT_PAGE(kasan_zero_page) - .skip PAGE_SIZE -#endif diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 4860906..0e4a05f 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -11,7 +11,19 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern struct range pfn_mapped[E820_X_MAX]; -extern unsigned char kasan_zero_page[PAGE_SIZE]; +static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; +static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; + +/* + * This page used as early shadow. We don't use empty_zero_page + * at early stages, stack instrumentation could write some garbage + * to this page. + * Latter we reuse it as zero shadow for large ranges of memory + * that allowed to access, but not instrumented by kasan + * (vmalloc/vmemmap ...). + */ +static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; static int __init map_range(struct range *range) { @@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start, pgd_clear(pgd_offset_k(start)); } -void __init kasan_map_early_shadow(pgd_t *pgd) +static void __init kasan_map_early_shadow(pgd_t *pgd) { int i; unsigned long start = KASAN_SHADOW_START; @@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = { }; #endif +void __init kasan_early_init(void) +{ + int i; + pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL; + pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; + pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; + + for (i = 0; i < PTRS_PER_PTE; i++) + kasan_zero_pte[i] = __pte(pte_val); + + for (i = 0; i < PTRS_PER_PMD; i++) + kasan_zero_pmd[i] = __pmd(pmd_val); + + for (i = 0; i < PTRS_PER_PUD; i++) + kasan_zero_pud[i] = __pud(pud_val); + + kasan_map_early_shadow(early_level4_pgt); + kasan_map_early_shadow(init_level4_pgt); +} + void __init kasan_init(void) { int i; -- cgit v0.10.2 From 241d2c54c62fa0939fc9a9512b48ac3434e90a89 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 2 Jul 2015 12:09:35 +0300 Subject: x86/kasan: Flush TLBs after switching CR3 load_cr3() doesn't cause tlb_flush if PGE enabled. This may cause tons of false positive reports spamming the kernel to death. To fix this __flush_tlb_all() should be called explicitly after CR3 changed. Signed-off-by: Andrey Ryabinin Cc: # 4.0+ Cc: Alexander Popov Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435828178-10975-4-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 0e4a05f..5d26642 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -208,6 +208,7 @@ void __init kasan_init(void) memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); load_cr3(early_level4_pgt); + __flush_tlb_all(); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -234,5 +235,6 @@ void __init kasan_init(void) memset(kasan_zero_page, 0, PAGE_SIZE); load_cr3(init_level4_pgt); + __flush_tlb_all(); init_task.kasan_depth = 0; } -- cgit v0.10.2 From d4f86beacc21d538dc41e1fc75a22e084f547edf Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 2 Jul 2015 12:09:36 +0300 Subject: x86/kasan: Fix boot crash on AMD processors While populating zero shadow wrong bits in upper level page tables used. __PAGE_KERNEL_RO that was used for pgd/pud/pmd has _PAGE_BIT_GLOBAL set. Global bit is present only in the lowest level of the page translation hierarchy (ptes), and it should be zero in upper levels. This bug seems doesn't cause any troubles on Intel cpus, while on AMDs it cause kernel crash on boot. Use _KERNPG_TABLE bits for pgds/puds/pmds to fix this. Reported-by: Borislav Petkov Signed-off-by: Andrey Ryabinin Cc: # 4.0+ Cc: Alexander Popov Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435828178-10975-5-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 5d26642..9a54dbe 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -85,7 +85,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { WARN_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PMD_SIZE; pmd = pmd_offset(pud, addr); } @@ -111,7 +111,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { WARN_ON(!pud_none(*pud)); set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PUD_SIZE; pud = pud_offset(pgd, addr); } @@ -136,7 +136,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end) while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { WARN_ON(!pgd_none(*pgd)); set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PGDIR_SIZE; pgd = pgd_offset_k(addr); } -- cgit v0.10.2 From 8515522949951d81fe2d06c0a3292f171f2b8ec4 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 2 Jul 2015 12:09:37 +0300 Subject: x86/kasan: Add message about KASAN being initialized Print informational message to tell user that kernel runs with KASAN enabled. Add a "kasan: " prefix to all messages in kasan_init_64.c. Signed-off-by: Andrey Ryabinin Cc: Alexander Popov Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435828178-10975-6-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 9a54dbe..e1840f3 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -1,3 +1,4 @@ +#define pr_fmt(fmt) "kasan: " fmt #include #include #include @@ -237,4 +238,6 @@ void __init kasan_init(void) load_cr3(init_level4_pgt); __flush_tlb_all(); init_task.kasan_depth = 0; + + pr_info("Kernel address sanitizer initialized\n"); } -- cgit v0.10.2 From d6f2d75a7ae06ffd793bb504c4f0d1665548cffc Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 2 Jul 2015 12:09:38 +0300 Subject: x86/kasan: Move KASAN_SHADOW_OFFSET to the arch Kconfig KASAN_SHADOW_OFFSET is purely arch specific setting, so it should be in arch's Kconfig file. Signed-off-by: Andrey Ryabinin Cc: Alexander Popov Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Paul Bolle Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1435828178-10975-7-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 55bced1..0b2929f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -254,6 +254,11 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y +config KASAN_SHADOW_OFFSET + hex + depends on KASAN + default 0xdffffc0000000000 + config HAVE_INTEL_TXT def_bool y depends on INTEL_IOMMU && ACPI diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 777eda7..39f24d6 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -18,10 +18,6 @@ config KASAN For better error detection enable CONFIG_STACKTRACE, and add slub_debug=U to boot cmdline. -config KASAN_SHADOW_OFFSET - hex - default 0xdffffc0000000000 if X86_64 - choice prompt "Instrumentation type" depends on KASAN -- cgit v0.10.2 From 1db875631f8d5bbf497f67e47f254eece888d51d Mon Sep 17 00:00:00 2001 From: Zhu Guihua Date: Fri, 3 Jul 2015 17:37:18 +0800 Subject: x86/espfix: Add 'cpu' parameter to init_espfix_ap() Add a CPU index parameter to init_espfix_ap(), so that the parameter could be propagated to the function for espfix page allocation. Signed-off-by: Zhu Guihua Cc: Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/cde3fcf1b3211f3f03feb1a995bce3fee850f0fc.1435824469.git.zhugh.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h index 99efebb..ca3ce9a 100644 --- a/arch/x86/include/asm/espfix.h +++ b/arch/x86/include/asm/espfix.h @@ -9,7 +9,7 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); extern void init_espfix_bsp(void); -extern void init_espfix_ap(void); +extern void init_espfix_ap(int cpu); #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index f5d0730..1fb0e00 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -131,12 +131,12 @@ void __init init_espfix_bsp(void) init_espfix_random(); /* The rest is the same as for any other processor */ - init_espfix_ap(); + init_espfix_ap(0); } -void init_espfix_ap(void) +void init_espfix_ap(int cpu) { - unsigned int cpu, page; + unsigned int page; unsigned long addr; pud_t pud, *pud_p; pmd_t pmd, *pmd_p; @@ -149,7 +149,6 @@ void init_espfix_ap(void) if (likely(this_cpu_read(espfix_stack))) return; /* Already initialized */ - cpu = smp_processor_id(); addr = espfix_base_addr(cpu); page = cpu/ESPFIX_STACKS_PER_PAGE; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8add66b..6f5abac 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -242,7 +242,7 @@ static void notrace start_secondary(void *unused) * Enable the espfix hack for this CPU */ #ifdef CONFIG_X86_ESPFIX64 - init_espfix_ap(); + init_espfix_ap(smp_processor_id()); #endif /* -- cgit v0.10.2 From 20d5e4a9cd453991e2590a4c25230a99b42dee0c Mon Sep 17 00:00:00 2001 From: Zhu Guihua Date: Fri, 3 Jul 2015 17:37:19 +0800 Subject: x86/espfix: Init espfix on the boot CPU side As we alloc pages with GFP_KERNEL in init_espfix_ap() which is called before we enable local irqs, so the lockdep sub-system would (correctly) trigger a warning about the potentially blocking API. So we allocate them on the boot CPU side when the secondary CPU is brought up by the boot CPU, and hand them over to the secondary CPU. And we use alloc_pages_node() with the secondary CPU's node, to make sure the espfix stack is NUMA-local to the CPU that is going to use it. Signed-off-by: Zhu Guihua Cc: Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/c97add2670e9abebb90095369f0cfc172373ac94.1435824469.git.zhugh.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 1fb0e00..ce95676 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -141,12 +141,12 @@ void init_espfix_ap(int cpu) pud_t pud, *pud_p; pmd_t pmd, *pmd_p; pte_t pte, *pte_p; - int n; + int n, node; void *stack_page; pteval_t ptemask; /* We only have to do this once... */ - if (likely(this_cpu_read(espfix_stack))) + if (likely(per_cpu(espfix_stack, cpu))) return; /* Already initialized */ addr = espfix_base_addr(cpu); @@ -164,12 +164,15 @@ void init_espfix_ap(int cpu) if (stack_page) goto unlock_done; + node = cpu_to_node(cpu); ptemask = __supported_pte_mask; pud_p = &espfix_pud_page[pud_index(addr)]; pud = *pud_p; if (!pud_present(pud)) { - pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); + struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); + + pmd_p = (pmd_t *)page_address(page); pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PUD_CLONES; n++) @@ -179,7 +182,9 @@ void init_espfix_ap(int cpu) pmd_p = pmd_offset(&pud, addr); pmd = *pmd_p; if (!pmd_present(pmd)) { - pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); + struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); + + pte_p = (pte_t *)page_address(page); pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PMD_CLONES; n++) @@ -187,7 +192,7 @@ void init_espfix_ap(int cpu) } pte_p = pte_offset_kernel(&pmd, addr); - stack_page = (void *)__get_free_page(GFP_KERNEL); + stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); for (n = 0; n < ESPFIX_PTE_CLONES; n++) set_pte(&pte_p[n*PTE_STRIDE], pte); @@ -198,7 +203,7 @@ void init_espfix_ap(int cpu) unlock_done: mutex_unlock(&espfix_init_mutex); done: - this_cpu_write(espfix_stack, addr); - this_cpu_write(espfix_waddr, (unsigned long)stack_page - + (addr & ~PAGE_MASK)); + per_cpu(espfix_stack, cpu) = addr; + per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page + + (addr & ~PAGE_MASK); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6f5abac..0bd8c1d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -239,13 +239,6 @@ static void notrace start_secondary(void *unused) check_tsc_sync_target(); /* - * Enable the espfix hack for this CPU - */ -#ifdef CONFIG_X86_ESPFIX64 - init_espfix_ap(smp_processor_id()); -#endif - - /* * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. @@ -854,6 +847,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) initial_code = (unsigned long)start_secondary; stack_start = idle->thread.sp; + /* + * Enable the espfix hack for this CPU + */ +#ifdef CONFIG_X86_ESPFIX64 + init_espfix_ap(cpu); +#endif + /* So we see what's up */ announce_cpu(cpu, apicid); -- cgit v0.10.2 From 1c7518794a3647eb345d59ee52844e8a40405198 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Fri, 3 Jul 2015 14:51:32 +0100 Subject: dm btree: silence lockdep lock inversion in dm_btree_del() Allocate memory using GFP_NOIO when deleting a btree. dm_btree_del() can be called via an ioctl and we don't want to recurse into the FS or block layer. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 200ac12..fdd3793 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) int r; struct del_stack *s; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc(sizeof(*s), GFP_NOIO); if (!s) return -ENOMEM; s->info = info; -- cgit v0.10.2 From 827a82ff399523a954253dfea401af748640f0f4 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 6 Jul 2015 10:14:34 -0400 Subject: x86/earlyprintk: Allow early_printk() to use console style parameters like '115200n8' When I enable early_printk on a kernel, I cut and paste the console= input and add to earlyprintk parameter. But I notice recently that ktest has not been detecting triple faults. The way it detects it, is by seeing the kernel banner "Linux version .." with a different kernel version pop up. Then I noticed that early printk was no longer working on my console, which was why ktest was not seeing it. I bisected it down and it was added to 4.0 with this commit: ea9e9d802902 ("Specify PCI based UART for earlyprintk") because it converted the simple_strtoul() that converts the baud number into a kstrtoul(). The problem with this is, I had as my baud rate, 115200n8 (acceptable for console=ttyS0), but because of the "n8", the kstrtoul() doesn't parse the baud rate and returns an error, which sets the baud rate to the default 9600. This explains the garbage on my screen. Now, earlyprintk= kernel parameter does not say it accepts that format. Thus, one answer would simply be me changing my kernel parameters to remove the "n8" since it isn't parsed anyway. But I wonder if other people run into this, and it seems strange that the two consoles for serial accepts different input. I could also extend this to have earlyprintk do something with that "n8" or whatever it has and have it match the console parsing (which, BTW, still uses simple_strtoul(), as I guess it has to). This patch just makes my old kernel parameter parsing work like it use to. Although, simple_strtoull() is considered obsolete, it is the only standard string parsing function that parses a number that is attached to text. Ironically, commit ea9e9d802902 also added several calls to simple_strtoul()! Signed-off-by: Steven Rostedt Cc: Alan Cox Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Borislav Petkov Cc: Brian Gerst Cc: David Cohen Cc: Denys Vlasenko Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stuart R. Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20150706101434.5f6a351b@gandalf.local.home [ Cleaned it up a bit. ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 89427d8..eec40f5 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -175,7 +175,9 @@ static __init void early_serial_init(char *s) } if (*s) { - if (kstrtoul(s, 0, &baud) < 0 || baud == 0) + baud = simple_strtoull(s, &e, 0); + + if (baud == 0 || s == e) baud = DEFAULT_BAUD; } -- cgit v0.10.2 From 4b59246d9aeaac71f5f7a29459cd2abe32c527e6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 6 Jul 2015 17:08:09 +0200 Subject: arm64: remove another unnecessary libfdt include path Patch 63a4aea55670 ("of: clean-up unnecessary libfdt include paths") removed all explicit libfdt include paths, since those are no longer necessary after the latest dtc upgrade. However, this one snuck in during the same merge window. Remove it. Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 9d84feb..773d37a 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -4,5 +4,3 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ context.o proc.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_ARM64_PTDUMP) += dump.o - -CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/ -- cgit v0.10.2 From 3446af31b721d839c69f8fc70b8a434df6176a64 Mon Sep 17 00:00:00 2001 From: Suneel Garapati Date: Mon, 29 Jun 2015 07:02:08 +0200 Subject: arm64: defconfig: Add Ceva ahci to the defconfig The Ceva ahci controller is available on the Xilinx Zynq UltraScale+ MPSoC. Signed-off-by: Suneel Garapati Signed-off-by: Michal Simek [catalin.marinas@arm.com: removed unnecessary defconfig changes] Signed-off-by: Catalin Marinas diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f38c94f..4e17e7e 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -83,6 +83,7 @@ CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_CEVA=y CONFIG_AHCI_XGENE=y CONFIG_PATA_PLATFORM=y CONFIG_PATA_OF_PLATFORM=y -- cgit v0.10.2 From 59b750006ae252b90aaab1fc6efcdb64976baa24 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sat, 20 Jun 2015 19:28:23 -0400 Subject: staging: make board support depend on OF_IRQ and CLKDEV_LOOKUP Building allmodconfig for arch/cris currently fails with: drivers/built-in.o: In function `board_staging_register_clock': drivers/staging/board/board.c:131: undefined reference to `clk_add_alias' make: *** [vmlinux] Error 1 The clk_add_alias lives in drivers/clk/clkdev.c and that file is only compiled for CONFIG_CLKDEV_LOOKUP, so it would seem we need to add a dependency on that. Geert also reported seeing this in his build coverage: There seems to be another missing dependency on OF_IRQ: drivers/built-in.o: In function `board_staging_gic_fixup_resources': (.init.text+0x21c2): undefined reference to `irq_create_of_mapping' so we might as well fix that at the same time since it is on the same line. Cc: Magnus Damm Cc: Simon Horman Cc: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Signed-off-by: Paul Gortmaker Signed-off-by: Chen Gang Cc: Stephen Rothwell Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig index b8ee818..3f287c4 100644 --- a/drivers/staging/board/Kconfig +++ b/drivers/staging/board/Kconfig @@ -1,6 +1,6 @@ config STAGING_BOARD bool "Staging Board Support" - depends on OF_ADDRESS + depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP help Select to enable per-board staging support code. -- cgit v0.10.2 From be9d39881fc4fa39a64b6eed6bab5d9ee5125344 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Tue, 2 Jun 2015 13:03:36 -0500 Subject: usb: musb: host: rely on port_mode to call musb_start() Currently, we're calling musb_start() twice for DRD ports in some situations. This has been observed to cause enumeration issues after suspend/resume cycles with AM335x. In order to fix the problem, we just have to fix the check on musb_has_gadget() so that it only returns true if current mode is Host and ignore the fact that we have or not a gadget driver loaded. Fixes: ae44df2e21b5 (usb: musb: call musb_start() only once in OTG mode) Cc: Sebastian Andrzej Siewior Cc: # v3.11+ Tested-by: Sekhar Nori Signed-off-by: Felipe Balbi diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 30842bc..92d5f71 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb) #ifdef CONFIG_USB_MUSB_HOST return 1; #else - if (musb->port_mode == MUSB_PORT_MODE_HOST) - return 1; - return musb->g.dev.driver != NULL; + return musb->port_mode == MUSB_PORT_MODE_HOST; #endif } -- cgit v0.10.2 From b58e6ceef96f2fbcb1698b0a7c7df59fdea0aa32 Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Mon, 29 Jun 2015 11:05:28 +0200 Subject: usb: dwc2: host: allocate qh before atomic enqueue To avoid sleep while atomic bugs, allocate qh before calling dwc2_hcd_urb_enqueue. qh pointer can be used directly now instead of passing ep->hcpriv as double pointer. Acked-by: John Youn Tested-by: Heiko Stuebner Tested-by: Doug Anderson Signed-off-by: Mian Yousaf Kaukab Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index b10377c..80bce71 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -359,7 +359,7 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) /* Caller must hold driver lock */ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, - struct dwc2_hcd_urb *urb, void **ep_handle, + struct dwc2_hcd_urb *urb, struct dwc2_qh *qh, gfp_t mem_flags) { struct dwc2_qtd *qtd; @@ -391,8 +391,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, return -ENOMEM; dwc2_hcd_qtd_init(qtd, urb); - retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, - mem_flags); + retval = dwc2_hcd_qtd_add(hsotg, qtd, qh); if (retval) { dev_err(hsotg->dev, "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", @@ -2445,6 +2444,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, u32 tflags = 0; void *buf; unsigned long flags; + struct dwc2_qh *qh; + bool qh_allocated = false; if (dbg_urb(urb)) { dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); @@ -2523,13 +2524,24 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, urb->iso_frame_desc[i].length); urb->hcpriv = dwc2_urb; + qh = (struct dwc2_qh *) ep->hcpriv; + /* Create QH for the endpoint if it doesn't exist */ + if (!qh) { + qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags); + if (!qh) { + retval = -ENOMEM; + goto fail0; + } + ep->hcpriv = qh; + qh_allocated = true; + } spin_lock_irqsave(&hsotg->lock, flags); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval) goto fail1; - retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); + retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, mem_flags); if (retval) goto fail2; @@ -2549,6 +2561,17 @@ fail2: fail1: spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + if (qh_allocated) { + struct dwc2_qtd *qtd2, *qtd2_tmp; + + ep->hcpriv = NULL; + dwc2_hcd_qh_unlink(hsotg, qh); + /* Free each QTD in the QH's QTD list */ + list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list, + qtd_list_entry) + dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh); + dwc2_hcd_qh_free(hsotg, qh); + } fail0: kfree(dwc2_urb); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7b5841c..fc10549 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, /* Schedule Queue Functions */ /* Implemented in hcd_queue.c */ extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); +extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, + struct dwc2_hcd_urb *urb, + gfp_t mem_flags); extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); @@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, - struct dwc2_qh **qh, gfp_t mem_flags); + struct dwc2_qh *qh); /* Unlinks and frees a QTD */ static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 9b5c362..3ad63d3 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, * * Return: Pointer to the newly allocated QH, or NULL on error */ -static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, +struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, gfp_t mem_flags) { @@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) * * @hsotg: The DWC HCD structure * @qtd: The QTD to add - * @qh: Out parameter to return queue head - * @atomic_alloc: Flag to do atomic alloc if needed + * @qh: Queue head to add qtd to * * Return: 0 if successful, negative error code otherwise * - * Finds the correct QH to place the QTD into. If it does not find a QH, it - * will create a new QH. If the QH to which the QTD is added is not currently - * scheduled, it is placed into the proper schedule based on its EP type. + * If the QH to which the QTD is added is not currently scheduled, it is placed + * into the proper schedule based on its EP type. */ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, - struct dwc2_qh **qh, gfp_t mem_flags) + struct dwc2_qh *qh) { - struct dwc2_hcd_urb *urb = qtd->urb; - int allocated = 0; int retval; - /* - * Get the QH which holds the QTD-list to insert to. Create QH if it - * doesn't exist. - */ - if (*qh == NULL) { - *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags); - if (*qh == NULL) - return -ENOMEM; - allocated = 1; + if (unlikely(!qh)) { + dev_err(hsotg->dev, "%s: Invalid QH\n", __func__); + retval = -EINVAL; + goto fail; } - retval = dwc2_hcd_qh_add(hsotg, *qh); + retval = dwc2_hcd_qh_add(hsotg, qh); if (retval) goto fail; - qtd->qh = *qh; - list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); + qtd->qh = qh; + list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); return 0; - fail: - if (allocated) { - struct dwc2_qtd *qtd2, *qtd2_tmp; - struct dwc2_qh *qh_tmp = *qh; - - *qh = NULL; - dwc2_hcd_qh_unlink(hsotg, qh_tmp); - - /* Free each QTD in the QH's QTD list */ - list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list, - qtd_list_entry) - dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp); - - dwc2_hcd_qh_free(hsotg, qh_tmp); - } - return retval; } -- cgit v0.10.2 From b5a468a6aa1ca9176594cd603760d80fa1e8b15e Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Mon, 29 Jun 2015 11:05:29 +0200 Subject: usb: dwc2: host: allocate qtd before atomic enqueue To avoid sleep while atomic bugs, allocate qtd before calling dwc2_hcd_urb_enqueue. No need to pass mem_flags to dwc2_hcd_urb_enqueue any more as no memory allocations are done in it. Acked-by: John Youn Tested-by: Heiko Stuebner Tested-by: Doug Anderson Signed-off-by: Mian Yousaf Kaukab Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 80bce71..f845c41 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -360,9 +360,8 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) /* Caller must hold driver lock */ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, struct dwc2_qh *qh, - gfp_t mem_flags) + struct dwc2_qtd *qtd) { - struct dwc2_qtd *qtd; u32 intr_mask; int retval; int dev_speed; @@ -386,9 +385,8 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, return -ENODEV; } - qtd = kzalloc(sizeof(*qtd), mem_flags); if (!qtd) - return -ENOMEM; + return -EINVAL; dwc2_hcd_qtd_init(qtd, urb); retval = dwc2_hcd_qtd_add(hsotg, qtd, qh); @@ -396,7 +394,6 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, dev_err(hsotg->dev, "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", retval); - kfree(qtd); return retval; } @@ -2446,6 +2443,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, unsigned long flags; struct dwc2_qh *qh; bool qh_allocated = false; + struct dwc2_qtd *qtd; if (dbg_urb(urb)) { dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); @@ -2536,14 +2534,20 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, qh_allocated = true; } + qtd = kzalloc(sizeof(*qtd), mem_flags); + if (!qtd) { + retval = -ENOMEM; + goto fail1; + } + spin_lock_irqsave(&hsotg->lock, flags); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval) - goto fail1; + goto fail2; - retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, mem_flags); + retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); if (retval) - goto fail2; + goto fail3; if (alloc_bandwidth) { dwc2_allocate_bus_bandwidth(hcd, @@ -2555,12 +2559,14 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return 0; -fail2: +fail3: dwc2_urb->priv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); -fail1: +fail2: spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + kfree(qtd); +fail1: if (qh_allocated) { struct dwc2_qtd *qtd2, *qtd2_tmp; -- cgit v0.10.2 From cc1e204cb092da8495fe2c24bdc4543c259d6b34 Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Mon, 29 Jun 2015 11:05:30 +0200 Subject: usb: dwc2: embed storage for reg backup in struct dwc2_hsotg Register backup function can be called from atomic context. Instead of using atomic memory pool, embed backup storage space in struct dwc2_hsotg. Also add a valid flag in each struct as NULL pointer can't be used as the content validity check any more. Acked-by: John Youn Tested-by: Heiko Stuebner Tested-by: Doug Anderson Signed-off-by: Mian Yousaf Kaukab Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index e5b546f..c3cc1a7 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Backup Host regs */ - hr = hsotg->hr_backup; - if (!hr) { - hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL); - if (!hr) { - dev_err(hsotg->dev, "%s: can't allocate host regs\n", - __func__); - return -ENOMEM; - } - - hsotg->hr_backup = hr; - } + hr = &hsotg->hr_backup; hr->hcfg = readl(hsotg->regs + HCFG); hr->haintmsk = readl(hsotg->regs + HAINTMSK); for (i = 0; i < hsotg->core_params->host_channels; ++i) @@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) hr->hprt0 = readl(hsotg->regs + HPRT0); hr->hfir = readl(hsotg->regs + HFIR); + hr->valid = true; return 0; } @@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore host regs */ - hr = hsotg->hr_backup; - if (!hr) { + hr = &hsotg->hr_backup; + if (!hr->valid) { dev_err(hsotg->dev, "%s: no host registers to restore\n", __func__); return -EINVAL; } + hr->valid = false; writel(hr->hcfg, hsotg->regs + HCFG); writel(hr->haintmsk, hsotg->regs + HAINTMSK); @@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Backup dev regs */ - dr = hsotg->dr_backup; - if (!dr) { - dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL); - if (!dr) { - dev_err(hsotg->dev, "%s: can't allocate device regs\n", - __func__); - return -ENOMEM; - } - - hsotg->dr_backup = dr; - } + dr = &hsotg->dr_backup; dr->dcfg = readl(hsotg->regs + DCFG); dr->dctl = readl(hsotg->regs + DCTL); @@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i)); dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i)); } - + dr->valid = true; return 0; } @@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore dev regs */ - dr = hsotg->dr_backup; - if (!dr) { + dr = &hsotg->dr_backup; + if (!dr->valid) { dev_err(hsotg->dev, "%s: no device registers to restore\n", __func__); return -EINVAL; } + dr->valid = false; writel(dr->dcfg, hsotg->regs + DCFG); writel(dr->dctl, hsotg->regs + DCTL); @@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) int i; /* Backup global regs */ - gr = hsotg->gr_backup; - if (!gr) { - gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL); - if (!gr) { - dev_err(hsotg->dev, "%s: can't allocate global regs\n", - __func__); - return -ENOMEM; - } - - hsotg->gr_backup = gr; - } + gr = &hsotg->gr_backup; gr->gotgctl = readl(hsotg->regs + GOTGCTL); gr->gintmsk = readl(hsotg->regs + GINTMSK); @@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) for (i = 0; i < MAX_EPS_CHANNELS; i++) gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i)); + gr->valid = true; return 0; } @@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore global regs */ - gr = hsotg->gr_backup; - if (!gr) { + gr = &hsotg->gr_backup; + if (!gr->valid) { dev_err(hsotg->dev, "%s: no global registers to restore\n", __func__); return -EINVAL; } + gr->valid = false; writel(0xffffffff, hsotg->regs + GINTSTS); writel(gr->gotgctl, hsotg->regs + GOTGCTL); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 53b8de0..0ed87620 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -492,6 +492,7 @@ struct dwc2_gregs_backup { u32 gdfifocfg; u32 dtxfsiz[MAX_EPS_CHANNELS]; u32 gpwrdn; + bool valid; }; /** @@ -521,6 +522,7 @@ struct dwc2_dregs_backup { u32 doepctl[MAX_EPS_CHANNELS]; u32 doeptsiz[MAX_EPS_CHANNELS]; u32 doepdma[MAX_EPS_CHANNELS]; + bool valid; }; /** @@ -538,6 +540,7 @@ struct dwc2_hregs_backup { u32 hcintmsk[MAX_EPS_CHANNELS]; u32 hprt0; u32 hfir; + bool valid; }; /** @@ -705,9 +708,9 @@ struct dwc2_hsotg { struct work_struct wf_otg; struct timer_list wkp_timer; enum dwc2_lx_state lx_state; - struct dwc2_gregs_backup *gr_backup; - struct dwc2_dregs_backup *dr_backup; - struct dwc2_hregs_backup *hr_backup; + struct dwc2_gregs_backup gr_backup; + struct dwc2_dregs_backup dr_backup; + struct dwc2_hregs_backup hr_backup; struct dentry *debug_root; struct debugfs_regset32 *regset; -- cgit v0.10.2 From 43cacb03aabe62158d8e5da876054c7d48fc9963 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 1 Jul 2015 22:03:09 -0500 Subject: usb: dwc3: core: avoid NULL pointer dereference commit 3e10a2ce98d1 ("usb: dwc3: add hsphy_interface property") introduced a possible NULL pointer dereference because dwc->hsphy_interface can be NULL. In order to fix it, all we have to do is guard strncmp() against a NULL argument. Fixes: 3e10a2ce98d1 ("usb: dwc3: add hsphy_interface property") Tested-by: Murali Karicheri Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5c110d8..ff5773c 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc) /* Select the HS PHY interface */ switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: - if (!strncmp(dwc->hsphy_interface, "utmi", 4)) { + if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "utmi", 4)) { reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; break; - } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) { + } else if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "ulpi", 4)) { reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } else { -- cgit v0.10.2 From 8515bac01a983d277148e4fcc5f235bf603de577 Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Fri, 19 Jun 2015 23:56:34 +0200 Subject: usb: f_mass_storage: limit number of reported LUNs Mass storage function created via configfs always reports eight LUNs to the hosts even if only one LUN has been configured. Adjust the number when the USB function is allocated based on LUNs that user has created. Cc: Tested-by: Gregory CLEMENT Signed-off-by: Michal Nazarewicz Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index d2259c6..f936268 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns) return -EINVAL; } - curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); + curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL); if (unlikely(!curlun)) return -ENOMEM; @@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns) common->luns = curlun; common->nluns = nluns; - pr_info("Number of LUNs=%d\n", common->nluns); - return 0; } EXPORT_SYMBOL_GPL(fsg_common_set_nluns); @@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi) struct fsg_opts *opts = fsg_opts_from_func_inst(fi); struct fsg_common *common = opts->common; struct fsg_dev *fsg; + unsigned nluns, i; fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); if (unlikely(!fsg)) return ERR_PTR(-ENOMEM); mutex_lock(&opts->lock); + if (!opts->refcnt) { + for (nluns = i = 0; i < FSG_MAX_LUNS; ++i) + if (common->luns[i]) + nluns = i + 1; + if (!nluns) + pr_warn("No LUNS defined, continuing anyway\n"); + else + common->nluns = nluns; + pr_info("Number of LUNs=%u\n", common->nluns); + } opts->refcnt++; mutex_unlock(&opts->lock); + fsg->function.name = FSG_DRIVER_DESC; fsg->function.bind = fsg_bind; fsg->function.unbind = fsg_unbind; -- cgit v0.10.2 From 4088acf1e845aba35f30fb91dee10649edbd0e84 Mon Sep 17 00:00:00 2001 From: Rui Miguel Silva Date: Mon, 18 May 2015 16:02:07 +0100 Subject: usb: gadget: f_fs: do not set cancel function on synchronous {read,write} do not try to set cancel function in synchronous operations in ffs_epfile_{read,write}_iter. Cc: # v4.0+ Acked-by: Al Viro Signed-off-by: Rui Miguel Silva Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 45b8c8b..6e7be91 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) kiocb->private = p; - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); res = ffs_epfile_io(kiocb->ki_filp, p); if (res == -EIOCBQUEUED) @@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) kiocb->private = p; - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); res = ffs_epfile_io(kiocb->ki_filp, p); if (res == -EIOCBQUEUED) -- cgit v0.10.2 From b4c21f0bdd2c0cd5d5be1bb56f0a28dae5041eed Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Thu, 11 Jun 2015 22:12:11 +0530 Subject: usb: gadget: composite: Fix NULL pointer dereference commit f563d230903210acc ("usb: gadget: composite: add req_match method to usb_function") accesses cdev->config even before set config is invoked causing a NULL pointer dereferencing error while running Lecroy Mass Storage Compliance test. Fix it here by accessing cdev->config only if it is non NULL. Fixes: commit f563d230903210acc ("usb: gadget: composite: add req_match method to usb_function"). Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 4e3447b..58b4657 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1758,10 +1758,13 @@ unknown: * take such requests too, if that's ever needed: to work * in config 0, etc. */ - list_for_each_entry(f, &cdev->config->functions, list) - if (f->req_match && f->req_match(f, ctrl)) - goto try_fun_setup; - f = NULL; + if (cdev->config) { + list_for_each_entry(f, &cdev->config->functions, list) + if (f->req_match && f->req_match(f, ctrl)) + goto try_fun_setup; + f = NULL; + } + switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) -- cgit v0.10.2 From 2184fe636b4e310cba52251971cee82cb2407a68 Mon Sep 17 00:00:00 2001 From: Takeshi Yoshimura Date: Sun, 14 Jun 2015 16:09:23 +0900 Subject: usb: gadget: udc: fix free_irq() after request_irq() failed My static checker detected the mistake. I fix this by changing "goto err_irq" to "goto err_req". The label err_irq is not used now so this patch also removes it. Signed-off-by: Takeshi Yoshimura Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index e547ea7..1137e33 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) udc_name, fotg210); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); - goto err_irq; + goto err_req; } ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget); @@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev) return 0; err_add_udc: -err_irq: free_irq(ires->start, fotg210); err_req: -- cgit v0.10.2 From 543aa4867d4a2dff5fc11e1b688197ee3bad7f89 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Mon, 15 Jun 2015 04:37:00 +0000 Subject: usb: phy: mxs: suspend to RAM causes NULL pointer dereference Triggering suspend to RAM via sysfs on a i.MX28 causes a NULL pointer dereference. This patch avoids the oops in mxs_phy_get_vbus_status() by aborting since there is no syscon available. Signed-off-by: Stefan Wahren Fixes: efdbd3a5d6e ("usb: phy: mxs: do not set PWD.RXPWD1PT1 for low speed connection") CC: # 4.0 Acked-by: Peter Chen Signed-off-by: Felipe Balbi diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 8f7cb06..3fcc048 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy) { unsigned int vbus_value; + if (!mxs_phy->regmap_anatop) + return false; + if (mxs_phy->port_id == 0) regmap_read(mxs_phy->regmap_anatop, ANADIG_USB1_VBUS_DET_STAT, -- cgit v0.10.2 From b2e2c94b878be2500d4d42c1f52a2fa1fe7648b4 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Fri, 3 Jul 2015 14:02:29 +0200 Subject: usb: gadget: f_midi: fix error recovery path In case kstrdup() fails the resources to release are midi->in_port[] and midi. No cards have been registered, so no need to unregister any. Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 6316aa5..ad50a67 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) if (opts->id && !midi->id) { status = -ENOMEM; mutex_unlock(&opts->lock); - goto kstrdup_fail; + goto setup_fail; } midi->in_ports = opts->in_ports; midi->out_ports = opts->out_ports; @@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) return &midi->func; -kstrdup_fail: - f_midi_unregister_card(midi); setup_fail: for (--i; i >= 0; i--) kfree(midi->in_port[i]); -- cgit v0.10.2 From b78fb51b68884ba9b8541a2f1914e8b6d054474c Mon Sep 17 00:00:00 2001 From: "qipeng.zha" Date: Tue, 7 Jul 2015 00:04:45 +0800 Subject: intel_pmc_ipc: Fix compiler casting warnings Avoid casting variables to different sizes due to different compilers and settings. Reported-by: Fengguang Wu Signed-off-by: qipeng.zha Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index d734763..69d43b8 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev { struct completion cmd_complete; /* The following PMC BARs share the same ACPI device with the IPC */ - void *acpi_io_base; + resource_size_t acpi_io_base; int acpi_io_size; struct platform_device *tco_dev; /* gcr */ - void *gcr_base; + resource_size_t gcr_base; int gcr_size; /* punit */ - void *punit_base; + resource_size_t punit_base; int punit_size; - void *punit_base2; + resource_size_t punit_base2; int punit_size2; struct platform_device *punit_dev; } ipcdev; @@ -480,11 +480,11 @@ static int ipc_create_punit_device(void) pdev->dev.parent = ipcdev.dev; res = punit_res; - res->start = (resource_size_t)ipcdev.punit_base; + res->start = ipcdev.punit_base; res->end = res->start + ipcdev.punit_size - 1; res = punit_res + PUNIT_RESOURCE_INTER; - res->start = (resource_size_t)ipcdev.punit_base2; + res->start = ipcdev.punit_base2; res->end = res->start + ipcdev.punit_size2 - 1; ret = platform_device_add_resources(pdev, punit_res, @@ -522,15 +522,15 @@ static int ipc_create_tco_device(void) pdev->dev.parent = ipcdev.dev; res = tco_res + TCO_RESOURCE_ACPI_IO; - res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET; + res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET; res->end = res->start + TCO_REGS_SIZE - 1; res = tco_res + TCO_RESOURCE_SMI_EN_IO; - res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET; + res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET; res->end = res->start + SMI_EN_SIZE - 1; res = tco_res + TCO_RESOURCE_GCR_MEM; - res->start = (resource_size_t)ipcdev.gcr_base; + res->start = ipcdev.gcr_base; res->end = res->start + ipcdev.gcr_size - 1; ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); @@ -589,7 +589,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.acpi_io_base = (void *)res->start; + ipcdev.acpi_io_base = res->start; ipcdev.acpi_io_size = size; dev_info(&pdev->dev, "io res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -601,7 +601,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.punit_base = (void *)res->start; + ipcdev.punit_base = res->start; ipcdev.punit_size = size; dev_info(&pdev->dev, "punit data res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -613,7 +613,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.punit_base2 = (void *)res->start; + ipcdev.punit_base2 = res->start; ipcdev.punit_size2 = size; dev_info(&pdev->dev, "punit interface res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -637,7 +637,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) } ipcdev.ipc_base = addr; - ipcdev.gcr_base = (void *)(res->start + size); + ipcdev.gcr_base = res->start + size; ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE; dev_info(&pdev->dev, "ipc res: %llx %x\n", (long long)res->start, (int)resource_size(res)); -- cgit v0.10.2 From 6d451367bfa16fc103604bacd258f534c65d1540 Mon Sep 17 00:00:00 2001 From: Hai Li Date: Thu, 25 Jun 2015 18:35:33 -0400 Subject: clk: qcom: Use parent rate when set rate to pixel RCG clock Since the parent rate has been recalculated, pixel RCG clock should rely on it to find the correct M/N values during set_rate, instead of calling __clk_round_rate() to its parent again. Signed-off-by: Hai Li Tested-by: Archit Taneja Fixes: 99cbd064b059 ("clk: qcom: Support display RCG clocks") [sboyd@codeaurora.org: Silenced unused parent variable warning] Signed-off-by: Stephen Boyd diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index b95d17f..92936f0 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct freq_tbl f = *rcg->freq_tbl; const struct frac_entry *frac = frac_table_pixel; - unsigned long request, src_rate; + unsigned long request; int delta = 100000; u32 mask = BIT(rcg->hid_width) - 1; u32 hid_div; - int index = qcom_find_src_index(hw, rcg->parent_map, f.src); - struct clk *parent = clk_get_parent_by_index(hw->clk, index); for (; frac->num; frac++) { request = (rate * frac->den) / frac->num; - src_rate = __clk_round_rate(parent, request); - if ((src_rate < (request - delta)) || - (src_rate > (request + delta))) + if ((parent_rate < (request - delta)) || + (parent_rate > (request + delta))) continue; regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, -- cgit v0.10.2 From c14bada8f71eef63d4465aa8e3a479104a06e7c7 Mon Sep 17 00:00:00 2001 From: Gabriel Fernandez Date: Tue, 23 Jun 2015 16:09:21 +0200 Subject: drivers: clk: st: Remove unused code Remove this duplicated code due to a bad copy / paste. Signed-off-by: Gabriel Fernandez Signed-off-by: Stephen Boyd diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index e94197f..099ed60 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -1082,10 +1082,6 @@ static const struct of_device_id quadfs_of_match[] = { .compatible = "st,stih407-quadfs660-D", .data = &st_fs660c32_D_407 }, - { - .compatible = "st,stih407-quadfs660-D", - .data = (void *)&st_fs660c32_D_407 - }, {} }; -- cgit v0.10.2 From c4d339c69f91c20def1366c1db8af6ac7a3013a9 Mon Sep 17 00:00:00 2001 From: Gabriel Fernandez Date: Tue, 23 Jun 2015 16:09:22 +0200 Subject: drivers: clk: st: Fix FSYN channel values This patch fixes the value for disabling the FSYN channel clock. The 'is_enabled' returned value is also fixed. Signed-off-by: Pankaj Dev Signed-off-by: Gabriel Fernandez Signed-off-by: Stephen Boyd diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 099ed60..95851c4 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -489,7 +489,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw) struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); u32 npda = CLKGEN_READ(pll, npda); - return !!npda; + return pll->data->powerup_polarity ? !npda : !!npda; } static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, @@ -774,7 +774,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw) if (fs->lock) spin_lock_irqsave(fs->lock, flags); - CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); + CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity); if (fs->lock) spin_unlock_irqrestore(fs->lock, flags); -- cgit v0.10.2 From 0f4f2afd4402883a51ad27a1d9e046643bb1e3cb Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Tue, 23 Jun 2015 16:09:23 +0200 Subject: drivers: clk: st: Fix flexgen lock init While proving lock, the following warning happens and it is fixed after initializing lock in the setup function INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.10.27-02861-g39df285-dirty #33 [] (unwind_backtrace+0x0/0xf4) from [] (show_stack+0x10/0x14) [] (show_stack+0x10/0x14) from [] (__lock_acquire+0x900/0xb14) [] (__lock_acquire+0x900/0xb14) from [] (lock_acquire+0x68/0x7c) [] (lock_acquire+0x68/0x7c) from [] (_raw_spin_lock_irqsave+0x48/0x5c) [] (_raw_spin_lock_irqsave+0x48/0x5c) from [] (clk_gate_endisable+0x28/0x88) [] (clk_gate_endisable+0x28/0x88) from [] (clk_gate_enable+0xc/0x14) [] (clk_gate_enable+0xc/0x14) from [] (flexgen_enable+0x28/0x40) [] (flexgen_enable+0x28/0x40) from [] (__clk_enable+0x5c/0x9c) [] (__clk_enable+0x5c/0x9c) from [] (clk_enable+0x18/0x2c) [] (clk_enable+0x18/0x2c) from [] (st_lpc_of_register+0xc0/0x248) [] (st_lpc_of_register+0xc0/0x248) from [] (clocksource_of_init+0x34/0x58) [] (clocksource_of_init+0x34/0x58) from [] (sti_timer_init+0x10/0x18) [] (sti_timer_init+0x10/0x18) from [] (time_init+0x20/0x30) [] (time_init+0x20/0x30) from [] (start_kernel+0x20c/0x2e8) [] (start_kernel+0x20c/0x2e8) from [<40008074>] (0x40008074) Signed-off-by: Giuseppe Cavallaro Signed-off-by: Gabriel Fernandez Fixes: b116517055b7 ("clk: st: STiH407: Support for Flexgen Clocks") Signed-off-by: Stephen Boyd diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 657ca14..be06d2a 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -303,6 +303,8 @@ static void __init st_of_flexgen_setup(struct device_node *np) if (!rlock) goto err; + spin_lock_init(rlock); + for (i = 0; i < clk_data->clk_num; i++) { struct clk *clk; const char *clk_name; -- cgit v0.10.2 From 18fee4538fe534c53fa95fe9eaa7f96586814e0a Mon Sep 17 00:00:00 2001 From: Pankaj Dev Date: Tue, 23 Jun 2015 16:09:24 +0200 Subject: drivers: clk: st: Add CLK_GET_RATE_NOCACHE flag to clocks Add the CLK_GET_RATE_NOCACHE flag to all the clocks with recalc ops, so that they reflect Hw rate after CPS wake-up when a clk_get_rate() is called Signed-off-by: Pankaj Dev Signed-off-by: Gabriel Fernandez Signed-off-by: Stephen Boyd diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index be06d2a..8dd8cce 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -190,7 +190,7 @@ static struct clk *clk_register_flexgen(const char *name, init.name = name; init.ops = &flexgen_ops; - init.flags = CLK_IS_BASIC | flexgen_flags; + init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags; init.parent_names = parent_names; init.num_parents = num_parents; diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 95851c4..99c98c1 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -635,7 +635,7 @@ static struct clk * __init st_clk_register_quadfs_pll( init.name = name; init.ops = quadfs->pll_ops; - init.flags = CLK_IS_BASIC; + init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE; init.parent_names = &parent_name; init.num_parents = 1; diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c index 4fbe6e0..bd30f8d 100644 --- a/drivers/clk/st/clkgen-mux.c +++ b/drivers/clk/st/clkgen-mux.c @@ -237,7 +237,7 @@ static struct clk *clk_register_genamux(const char *name, init.name = name; init.ops = &clkgena_divmux_ops; - init.flags = CLK_IS_BASIC; + init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE; init.parent_names = parent_names; init.num_parents = num_parents; @@ -513,7 +513,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np) 0, &clk_name)) return; - clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, + clk = clk_register_divider_table(NULL, clk_name, parent_name, + CLK_GET_RATE_NOCACHE, reg + data->offset, data->shift, 1, 0, data->table, NULL); if (IS_ERR(clk)) @@ -786,7 +787,8 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np) &mux->hw, &clk_mux_ops, &div->hw, &clk_divider_ops, &gate->hw, &clk_gate_ops, - data->clk_flags); + data->clk_flags | + CLK_GET_RATE_NOCACHE); if (IS_ERR(clk)) { kfree(gate); kfree(div); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index 1065322..72d1c27 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -406,7 +406,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name, init.name = clk_name; init.ops = pll_data->ops; - init.flags = CLK_IS_BASIC; + init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE; init.parent_names = &parent_name; init.num_parents = 1; -- cgit v0.10.2 From 3be6d8ce639d92e60d144fb99dd74a53fe3799bb Mon Sep 17 00:00:00 2001 From: Gabriel Fernandez Date: Tue, 23 Jun 2015 16:09:25 +0200 Subject: drivers: clk: st: Fix mux bit-setting for Cortex A9 clocks This patch fixes the mux bit-setting for ClockgenA9. Signed-off-by: Gabriel Fernandez Fixes: 13e6f2da1ddf ("clk: st: STiH407: Support for A9 MUX Clocks") Signed-off-by: Stephen Boyd diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c index bd30f8d..717c4a9 100644 --- a/drivers/clk/st/clkgen-mux.c +++ b/drivers/clk/st/clkgen-mux.c @@ -583,7 +583,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = { }; static struct clkgen_mux_data stih407_a9_mux_data = { .offset = 0x1a4, - .shift = 1, + .shift = 0, .width = 2, }; -- cgit v0.10.2 From 0294112ee3135fbd15eaa70015af8283642dd970 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 4 Jul 2015 03:09:03 +0200 Subject: ACPI / PNP: Reserve ACPI resources at the fs_initcall_sync stage This effectively reverts the following three commits: 7bc10388ccdd ACPI / resources: free memory on error in add_region_before() 0f1b414d1907 ACPI / PNP: Avoid conflicting resource reservations b9a5e5e18fbf ACPI / init: Fix the ordering of acpi_reserve_resources() (commit b9a5e5e18fbf introduced regressions some of which, but not all, were addressed by commit 0f1b414d1907 and commit 7bc10388ccdd was a fixup on top of the latter) and causes ACPI fixed hardware resources to be reserved at the fs_initcall_sync stage of system initialization. The story is as follows. First, a boot regression was reported due to an apparent resource reservation ordering change after a commit that shouldn't lead to such changes. Investigation led to the conclusion that the problem happened because acpi_reserve_resources() was executed at the device_initcall() stage of system initialization which wasn't strictly ordered with respect to driver initialization (and with respect to the initialization of the pcieport driver in particular), so a random change causing the device initcalls to be run in a different order might break things. The response to that was to attempt to run acpi_reserve_resources() as soon as we knew that ACPI would be in use (commit b9a5e5e18fbf). However, that turned out to be too early, because it caused resource reservations made by the PNP system driver to fail on at least one system and that failure was addressed by commit 0f1b414d1907. That fix still turned out to be insufficient, though, because calling acpi_reserve_resources() before the fs_initcall stage of system initialization caused a boot regression to happen on the eCAFE EC-800-H20G/S netbook. That meant that we only could call acpi_reserve_resources() at the fs_initcall initialization stage or later, but then we might just as well call it after the PNP initalization in which case commit 0f1b414d1907 wouldn't be necessary any more. For this reason, the changes made by commit 0f1b414d1907 are reverted (along with a memory leak fixup on top of that commit), the changes made by commit b9a5e5e18fbf that went too far are reverted too and acpi_reserve_resources() is changed into fs_initcall_sync, which will cause it to be executed after the PNP subsystem initialization (which is an fs_initcall) and before device initcalls (including the pcieport driver initialization) which should avoid the initial issue. Link: https://bugzilla.kernel.org/show_bug.cgi?id=100581 Link: http://marc.info/?t=143092384600002&r=1&w=2 Link: https://bugzilla.kernel.org/show_bug.cgi?id=99831 Link: http://marc.info/?t=143389402600001&r=1&w=2 Fixes: b9a5e5e18fbf "ACPI / init: Fix the ordering of acpi_reserve_resources()" Reported-by: Roland Dreier Cc: All applicable Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c262e4a..3b8963f 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, if (!addr || !length) return; - acpi_reserve_region(addr, length, gas->space_id, 0, desc); + /* Resources are never freed */ + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) + request_region(addr, length, desc); + else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + request_mem_region(addr, length, desc); } -static void __init acpi_reserve_resources(void) +static int __init acpi_reserve_resources(void) { acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, "ACPI PM1a_EVT_BLK"); @@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void) if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) acpi_request_region(&acpi_gbl_FADT.xgpe1_block, acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); + + return 0; } +fs_initcall_sync(acpi_reserve_resources); void acpi_os_printf(const char *fmt, ...) { @@ -1862,7 +1869,6 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { - acpi_reserve_resources(); kacpid_wq = alloc_workqueue("kacpid", 0, 1); kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 10561ce..8244f01 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #ifdef CONFIG_X86 @@ -622,164 +621,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares, return (type & types) ? 0 : 1; } EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); - -struct reserved_region { - struct list_head node; - u64 start; - u64 end; -}; - -static LIST_HEAD(reserved_io_regions); -static LIST_HEAD(reserved_mem_regions); - -static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags, - char *desc) -{ - unsigned int length = end - start + 1; - struct resource *res; - - res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ? - request_region(start, length, desc) : - request_mem_region(start, length, desc); - if (!res) - return -EIO; - - res->flags &= ~flags; - return 0; -} - -static int add_region_before(u64 start, u64 end, u8 space_id, - unsigned long flags, char *desc, - struct list_head *head) -{ - struct reserved_region *reg; - int error; - - reg = kmalloc(sizeof(*reg), GFP_KERNEL); - if (!reg) - return -ENOMEM; - - error = request_range(start, end, space_id, flags, desc); - if (error) { - kfree(reg); - return error; - } - - reg->start = start; - reg->end = end; - list_add_tail(®->node, head); - return 0; -} - -/** - * acpi_reserve_region - Reserve an I/O or memory region as a system resource. - * @start: Starting address of the region. - * @length: Length of the region. - * @space_id: Identifier of address space to reserve the region from. - * @flags: Resource flags to clear for the region after requesting it. - * @desc: Region description (for messages). - * - * Reserve an I/O or memory region as a system resource to prevent others from - * using it. If the new region overlaps with one of the regions (in the given - * address space) already reserved by this routine, only the non-overlapping - * parts of it will be reserved. - * - * Returned is either 0 (success) or a negative error code indicating a resource - * reservation problem. It is the code of the first encountered error, but the - * routine doesn't abort until it has attempted to request all of the parts of - * the new region that don't overlap with other regions reserved previously. - * - * The resources requested by this routine are never released. - */ -int acpi_reserve_region(u64 start, unsigned int length, u8 space_id, - unsigned long flags, char *desc) -{ - struct list_head *regions; - struct reserved_region *reg; - u64 end = start + length - 1; - int ret = 0, error = 0; - - if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) - regions = &reserved_io_regions; - else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - regions = &reserved_mem_regions; - else - return -EINVAL; - - if (list_empty(regions)) - return add_region_before(start, end, space_id, flags, desc, regions); - - list_for_each_entry(reg, regions, node) - if (reg->start == end + 1) { - /* The new region can be prepended to this one. */ - ret = request_range(start, end, space_id, flags, desc); - if (!ret) - reg->start = start; - - return ret; - } else if (reg->start > end) { - /* No overlap. Add the new region here and get out. */ - return add_region_before(start, end, space_id, flags, - desc, ®->node); - } else if (reg->end == start - 1) { - goto combine; - } else if (reg->end >= start) { - goto overlap; - } - - /* The new region goes after the last existing one. */ - return add_region_before(start, end, space_id, flags, desc, regions); - - overlap: - /* - * The new region overlaps an existing one. - * - * The head part of the new region immediately preceding the existing - * overlapping one can be combined with it right away. - */ - if (reg->start > start) { - error = request_range(start, reg->start - 1, space_id, flags, desc); - if (error) - ret = error; - else - reg->start = start; - } - - combine: - /* - * The new region is adjacent to an existing one. If it extends beyond - * that region all the way to the next one, it is possible to combine - * all three of them. - */ - while (reg->end < end) { - struct reserved_region *next = NULL; - u64 a = reg->end + 1, b = end; - - if (!list_is_last(®->node, regions)) { - next = list_next_entry(reg, node); - if (next->start <= end) - b = next->start - 1; - } - error = request_range(a, b, space_id, flags, desc); - if (!error) { - if (next && next->start == b + 1) { - reg->end = next->end; - list_del(&next->node); - kfree(next); - } else { - reg->end = end; - break; - } - } else if (next) { - if (!ret) - ret = error; - - reg = next; - } else { - break; - } - } - - return ret ? ret : error; -} -EXPORT_SYMBOL_GPL(acpi_reserve_region); diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c index 515f338..49c1720 100644 --- a/drivers/pnp/system.c +++ b/drivers/pnp/system.c @@ -7,7 +7,6 @@ * Bjorn Helgaas */ -#include #include #include #include @@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = { {"", 0} }; -#ifdef CONFIG_ACPI -static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc) -{ - u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY; - return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc); -} -#else -static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc) -{ - struct resource *res; - - res = io ? request_region(start, length, desc) : - request_mem_region(start, length, desc); - if (res) { - res->flags &= ~IORESOURCE_BUSY; - return true; - } - return false; -} -#endif - static void reserve_range(struct pnp_dev *dev, struct resource *r, int port) { char *regionid; const char *pnpid = dev_name(&dev->dev); resource_size_t start = r->start, end = r->end; - bool reserved; + struct resource *res; regionid = kmalloc(16, GFP_KERNEL); if (!regionid) return; snprintf(regionid, 16, "pnp %s", pnpid); - reserved = __reserve_range(start, end - start + 1, !!port, regionid); - if (!reserved) + if (port) + res = request_region(start, end - start + 1, regionid); + else + res = request_mem_region(start, end - start + 1, regionid); + if (res) + res->flags &= ~IORESOURCE_BUSY; + else kfree(regionid); /* @@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port) * have double reservations. */ dev_info(&dev->dev, "%pR %s reserved\n", r, - reserved ? "has been" : "could not be"); + res ? "has been" : "could not be"); } static void reserve_resources_of_dev(struct pnp_dev *dev) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c471dfc..fedfd1b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -309,9 +309,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n, int acpi_resources_are_enforced(void); -int acpi_reserve_region(u64 start, unsigned int length, u8 space_id, - unsigned long flags, char *desc); - #ifdef CONFIG_HIBERNATION void __init acpi_no_s4_hw_signature(void); #endif @@ -507,13 +504,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n, return 0; } -static inline int acpi_reserve_region(u64 start, unsigned int length, - u8 space_id, unsigned long flags, - char *desc) -{ - return -ENXIO; -} - struct acpi_table_header; static inline int acpi_table_parse(char *id, int (*handler)(struct acpi_table_header *)) -- cgit v0.10.2 From d3e13ff3c1aa2403d9a5f371baac088daeb8f56d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Jul 2015 00:31:47 +0200 Subject: ACPI / LPSS: Fix up acpi_lpss_create_device() Fix a return value (which should be a negative error code) and a memory leak (the list allocated by acpi_dev_get_resources() needs to be freed on ioremap() errors too) in acpi_lpss_create_device() introduced by commit 4483d59e29fe 'ACPI / LPSS: check the result of ioremap()'. Fixes: 4483d59e29fe 'ACPI / LPSS: check the result of ioremap()' Reported-by: Dan Carpenter Cc: 4.0+ # 4.0+ Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 569ee09..46b58ab 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev, pdata->mmio_size = resource_size(rentry->res); pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size); - if (!pdata->mmio_base) - goto err_out; break; } acpi_dev_free_resource_list(&resource_list); + if (!pdata->mmio_base) { + ret = -ENOMEM; + goto err_out; + } + pdata->dev_desc = dev_desc; if (dev_desc->setup) -- cgit v0.10.2 From ced53f6d12e497ba210a518e7e76178da987f932 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Mon, 6 Jul 2015 12:08:55 +0200 Subject: dell-laptop: Clear buffer before each SMBIOS call MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure that before initializing SMBIOS call, the input buffer does not contain any garbage (e.g. values from previous SMBIOS call). This fixes problems with passing undefined/random parameters to SMBIOS functions. Signed-off-by: Pali Rohár Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index ed317cc..85fbe7c 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -311,10 +311,15 @@ static DEFINE_MUTEX(buffer_mutex); static int hwswitch_state; +static void clear_buffer(void) +{ + memset(buffer, 0, sizeof(struct calling_interface_buffer)); +} + static void get_buffer(void) { mutex_lock(&buffer_mutex); - memset(buffer, 0, sizeof(struct calling_interface_buffer)); + clear_buffer(); } static void release_buffer(void) @@ -558,6 +563,8 @@ static int dell_rfkill_set(void *data, bool blocked) !(buffer->output[1] & BIT(16))) disable = 1; + clear_buffer(); + buffer->input[0] = (1 | (radio<<8) | (disable << 16)); dell_send_request(buffer, 17, 11); @@ -572,6 +579,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, if (status & BIT(0)) { /* Has hw-switch, sync sw_state to BIOS */ int block = rfkill_blocked(rfkill); + clear_buffer(); buffer->input[0] = (1 | (radio << 8) | (block << 16)); dell_send_request(buffer, 17, 11); } else { @@ -774,6 +782,7 @@ static int __init dell_setup_rfkill(void) get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; + clear_buffer(); buffer->input[0] = 0x2; dell_send_request(buffer, 17, 11); hwswitch_state = buffer->output[1]; -- cgit v0.10.2 From 715d0cdd5a459908c2b2eb1cfa30d6f6d1d7d710 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Mon, 6 Jul 2015 12:08:56 +0200 Subject: dell-laptop: Check return value of each SMBIOS call MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure that return value of each SMBIOS call is properly checked and do not continue processing output if the call failed. Signed-off-by: Pali Rohár Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 85fbe7c..efcfc49 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -553,9 +553,15 @@ static int dell_rfkill_set(void *data, bool blocked) int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + + if (ret != 0) + goto out; /* If the hardware switch controls this radio, and the hardware switch is disabled, always disable the radio */ @@ -567,9 +573,11 @@ static int dell_rfkill_set(void *data, bool blocked) buffer->input[0] = (1 | (radio<<8) | (disable << 16)); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + out: release_buffer(); - return 0; + return dell_smi_error(ret); } /* Must be called with the buffer held */ @@ -598,14 +606,18 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, static void dell_rfkill_query(struct rfkill *rfkill, void *data) { int status; + int ret; get_buffer(); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; + release_buffer(); - dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); + if (ret != 0) + return; - release_buffer(); + dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); } static const struct rfkill_ops dell_rfkill_ops = { @@ -618,12 +630,15 @@ static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { int status; + int ret; get_buffer(); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; release_buffer(); + seq_printf(s, "return:\t%d\n", ret); seq_printf(s, "status:\t0x%X\n", status); seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", status & BIT(0)); @@ -702,11 +717,17 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; + if (ret != 0) + goto out; + if (wifi_rfkill) { dell_rfkill_update_hw_state(wifi_rfkill, 1, status); dell_rfkill_update_sw_state(wifi_rfkill, 1, status); @@ -720,6 +741,7 @@ static void dell_update_rfkill(struct work_struct *ignored) dell_rfkill_update_sw_state(wwan_rfkill, 3, status); } + out: release_buffer(); } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); @@ -781,6 +803,7 @@ static int __init dell_setup_rfkill(void) get_buffer(); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; clear_buffer(); buffer->input[0] = 0x2; @@ -788,6 +811,10 @@ static int __init dell_setup_rfkill(void) hwswitch_state = buffer->output[1]; release_buffer(); + /* dell wireless info smbios call is not supported */ + if (ret != 0) + return 0; + if (!(status & BIT(0))) { if (force_rfkill) { /* No hwsitch, clear all hw-controlled bits */ @@ -941,47 +968,50 @@ static void dell_cleanup_rfkill(void) static int dell_send_intensity(struct backlight_device *bd) { - int ret = 0; + int token; + int ret; + + token = find_token_location(BRIGHTNESS_TOKEN); + if (token == -1) + return -ENODEV; get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); + buffer->input[0] = token; buffer->input[1] = bd->props.brightness; - if (buffer->input[0] == -1) { - ret = -ENODEV; - goto out; - } - if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 1, 2); else dell_send_request(buffer, 1, 1); - out: + ret = dell_smi_error(buffer->output[0]); + release_buffer(); return ret; } static int dell_get_intensity(struct backlight_device *bd) { - int ret = 0; + int token; + int ret; - get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); + token = find_token_location(BRIGHTNESS_TOKEN); + if (token == -1) + return -ENODEV; - if (buffer->input[0] == -1) { - ret = -ENODEV; - goto out; - } + get_buffer(); + buffer->input[0] = token; if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 0, 2); else dell_send_request(buffer, 0, 1); - ret = buffer->output[1]; + if (buffer->output[0]) + ret = dell_smi_error(buffer->output[0]); + else + ret = buffer->output[1]; - out: release_buffer(); return ret; } @@ -2045,6 +2075,7 @@ static void kbd_led_exit(void) static int __init dell_init(void) { int max_intensity = 0; + int token; int ret; if (!dmi_check_system(dell_device_table)) @@ -2103,13 +2134,15 @@ static int __init dell_init(void) if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; - get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); - if (buffer->input[0] != -1) { + token = find_token_location(BRIGHTNESS_TOKEN); + if (token != -1) { + get_buffer(); + buffer->input[0] = token; dell_send_request(buffer, 0, 2); - max_intensity = buffer->output[3]; + if (buffer->output[0] == 0) + max_intensity = buffer->output[3]; + release_buffer(); } - release_buffer(); if (max_intensity) { struct backlight_properties props; -- cgit v0.10.2 From 22565ba0bf231eb4267b1d2ebad300d55b28a427 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Mon, 6 Jul 2015 12:08:57 +0200 Subject: dell-laptop: Do not cache hwswitch state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The hwswitch state can be changed at runtime, so make sure dell-laptop always knows the current state. It can be modified by the userspace utility smbios-wireless-ctl. Signed-off-by: Pali Rohár Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index efcfc49..aaeeae8 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -309,8 +309,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = { static struct calling_interface_buffer *buffer; static DEFINE_MUTEX(buffer_mutex); -static int hwswitch_state; - static void clear_buffer(void) { memset(buffer, 0, sizeof(struct calling_interface_buffer)); @@ -553,20 +551,30 @@ static int dell_rfkill_set(void *data, bool blocked) int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; + int hwswitch; + int status; int ret; get_buffer(); dell_send_request(buffer, 17, 11); ret = buffer->output[0]; + status = buffer->output[1]; if (ret != 0) goto out; + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + hwswitch = buffer->output[1]; + /* If the hardware switch controls this radio, and the hardware switch is disabled, always disable the radio */ - if ((hwswitch_state & BIT(hwswitch_bit)) && - !(buffer->output[1] & BIT(16))) + if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) && + (status & BIT(0)) && !(status & BIT(16))) disable = 1; clear_buffer(); @@ -597,27 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, } static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, - int status) + int status, int hwswitch) { - if (hwswitch_state & (BIT(radio - 1))) + if (hwswitch & (BIT(radio - 1))) rfkill_set_hw_state(rfkill, !(status & BIT(16))); } static void dell_rfkill_query(struct rfkill *rfkill, void *data) { + int radio = ((unsigned long)data & 0xF); + int hwswitch; int status; int ret; get_buffer(); + dell_send_request(buffer, 17, 11); ret = buffer->output[0]; status = buffer->output[1]; + + if (ret != 0 || !(status & BIT(0))) { + release_buffer(); + return; + } + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + hwswitch = buffer->output[1]; + release_buffer(); if (ret != 0) return; - dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); + dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch); } static const struct rfkill_ops dell_rfkill_ops = { @@ -629,13 +653,24 @@ static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { + int hwswitch_state; + int hwswitch_ret; int status; int ret; get_buffer(); + dell_send_request(buffer, 17, 11); ret = buffer->output[0]; status = buffer->output[1]; + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + hwswitch_ret = buffer->output[0]; + hwswitch_state = buffer->output[1]; + release_buffer(); seq_printf(s, "return:\t%d\n", ret); @@ -680,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "Bit 21: WiGig is blocked: %lu\n", (status & BIT(21)) >> 21); - seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); + seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret); + seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state); seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", hwswitch_state & BIT(0)); seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", @@ -716,6 +752,7 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { + int hwswitch = 0; int status; int ret; @@ -728,16 +765,26 @@ static void dell_update_rfkill(struct work_struct *ignored) if (ret != 0) goto out; + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + + if (ret == 0 && (status & BIT(0))) + hwswitch = buffer->output[1]; + if (wifi_rfkill) { - dell_rfkill_update_hw_state(wifi_rfkill, 1, status); + dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch); dell_rfkill_update_sw_state(wifi_rfkill, 1, status); } if (bluetooth_rfkill) { - dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status); + dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status, + hwswitch); dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); } if (wwan_rfkill) { - dell_rfkill_update_hw_state(wwan_rfkill, 3, status); + dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch); dell_rfkill_update_sw_state(wwan_rfkill, 3, status); } @@ -805,25 +852,15 @@ static int __init dell_setup_rfkill(void) dell_send_request(buffer, 17, 11); ret = buffer->output[0]; status = buffer->output[1]; - clear_buffer(); - buffer->input[0] = 0x2; - dell_send_request(buffer, 17, 11); - hwswitch_state = buffer->output[1]; release_buffer(); /* dell wireless info smbios call is not supported */ if (ret != 0) return 0; - if (!(status & BIT(0))) { - if (force_rfkill) { - /* No hwsitch, clear all hw-controlled bits */ - hwswitch_state &= ~7; - } else { - /* rfkill is only tested on laptops with a hwswitch */ - return 0; - } - } + /* rfkill is only tested on laptops with a hwswitch */ + if (!(status & BIT(0)) && !force_rfkill) + return 0; if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, -- cgit v0.10.2 From 7b2a4635b84b4dbb07c93201a8c0aea82ed65e4f Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 30 Jun 2015 10:58:44 +0800 Subject: clk: mediatek: mt8173: Fix enabling of critical clocks On the MT8173 the clocks are provided by different units. To enable the critical clocks we must be sure that all parent clocks are already registered, otherwise the parents of the critical clocks end up being unused and get disabled later. To find a place where all parents are registered we try each time after we've registered some clocks if all known providers are present now and only then we enable the critical clocks Signed-off-by: Sascha Hauer Signed-off-by: James Liao [sboyd@codeaurora.org: Marked function and data __init] Signed-off-by: Stephen Boyd diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c index 4b9e04c..8b6523d 100644 --- a/drivers/clk/mediatek/clk-mt8173.c +++ b/drivers/clk/mediatek/clk-mt8173.c @@ -700,6 +700,22 @@ static const struct mtk_composite peri_clks[] __initconst = { MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1), }; +static struct clk_onecell_data *mt8173_top_clk_data __initdata; +static struct clk_onecell_data *mt8173_pll_clk_data __initdata; + +static void __init mtk_clk_enable_critical(void) +{ + if (!mt8173_top_clk_data || !mt8173_pll_clk_data) + return; + + clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]); + clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]); + clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]); + clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]); + clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]); + clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]); +} + static void __init mtk_topckgen_init(struct device_node *node) { struct clk_onecell_data *clk_data; @@ -712,19 +728,19 @@ static void __init mtk_topckgen_init(struct device_node *node) return; } - clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data); mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8173_clk_lock, clk_data); - clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); + + mtk_clk_enable_critical(); } CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init); @@ -818,13 +834,13 @@ static void __init mtk_apmixedsys_init(struct device_node *node) { struct clk_onecell_data *clk_data; - clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); if (!clk_data) return; mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); - clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]); + mtk_clk_enable_critical(); } CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys", mtk_apmixedsys_init); -- cgit v0.10.2 From 93af5e93544328285a6f65f7d47bbea8979b28fb Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 26 Jun 2015 11:14:14 +0200 Subject: PM / Domains: Avoid infinite loops in attach/detach code If pm_genpd_{add,remove}_device() keeps on failing with -EAGAIN, we end up with an infinite loop in genpd_dev_pm_{at,de}tach(). This may happen due to a genpd.prepared_count imbalance. This is a bug elsewhere, but it will result in a system lock up, possibly during reboot of an otherwise functioning system. To avoid this, put a limit on the maximum number of loop iterations, using an exponential back-off mechanism. If the limit is reached, the operation will just fail. An error message is already printed. Signed-off-by: Geert Uytterhoeven Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index cdd547b..0ee43c1 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -6,6 +6,7 @@ * This file is released under the GPLv2. */ +#include #include #include #include @@ -19,6 +20,8 @@ #include #include +#define GENPD_RETRY_MAX_MS 250 /* Approximate */ + #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ ({ \ type (*__routine)(struct device *__d); \ @@ -2131,6 +2134,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); static void genpd_dev_pm_detach(struct device *dev, bool power_off) { struct generic_pm_domain *pd; + unsigned int i; int ret = 0; pd = pm_genpd_lookup_dev(dev); @@ -2139,10 +2143,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off) dev_dbg(dev, "removing from PM domain %s\n", pd->name); - while (1) { + for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { ret = pm_genpd_remove_device(pd, dev); if (ret != -EAGAIN) break; + + mdelay(i); cond_resched(); } @@ -2183,6 +2189,7 @@ int genpd_dev_pm_attach(struct device *dev) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; + unsigned int i; int ret; if (!dev->of_node) @@ -2218,10 +2225,12 @@ int genpd_dev_pm_attach(struct device *dev) dev_dbg(dev, "adding to PM domain %s\n", pd->name); - while (1) { + for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { ret = pm_genpd_add_device(pd, dev); if (ret != -EAGAIN) break; + + mdelay(i); cond_resched(); } -- cgit v0.10.2 From b51f9b103f58db2c5c0a20d6cee26c8bc255d3ae Mon Sep 17 00:00:00 2001 From: Uwe Geuder Date: Mon, 29 Jun 2015 23:35:05 +0300 Subject: PM / hibernate: clarify resume documentation it was not the whole truth that kernel mode cannot be used with swap on LVM Signed-off-by: Uwe Geuder Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt index f732a83..8cc17ca 100644 --- a/Documentation/power/swsusp.txt +++ b/Documentation/power/swsusp.txt @@ -410,8 +410,17 @@ Documentation/usb/persist.txt. Q: Can I suspend-to-disk using a swap partition under LVM? -A: No. You can suspend successfully, but you'll not be able to -resume. uswsusp should be able to work with LVM. See suspend.sf.net. +A: Yes and No. You can suspend successfully, but the kernel will not be able +to resume on its own. You need an initramfs that can recognize the resume +situation, activate the logical volume containing the swap volume (but not +touch any filesystems!), and eventually call + +echo -n "$major:$minor" > /sys/power/resume + +where $major and $minor are the respective major and minor device numbers of +the swap volume. + +uswsusp works with LVM, too. See http://suspend.sourceforge.net/ Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were compiled with the similar configuration files. Anyway I found that -- cgit v0.10.2 From 26095a01d359827eeccec5459c28ddd976183179 Mon Sep 17 00:00:00 2001 From: "Suthikulpanit, Suravee" Date: Tue, 7 Jul 2015 01:55:20 +0200 Subject: ACPI / scan: Add support for ACPI _CLS device matching Device drivers typically use ACPI _HIDs/_CIDs listed in struct device_driver acpi_match_table to match devices. However, for generic drivers, we do not want to list _HID for all supported devices. Also, certain classes of devices do not have _CID (e.g. SATA, USB). Instead, we can leverage ACPI _CLS, which specifies PCI-defined class code (i.e. base-class, subclass and programming interface). This patch adds support for matching ACPI devices using the _CLS method. To support loadable module, current design uses _HID or _CID to match device's modalias. With the new way of matching with _CLS this would requires modification to the current ACPI modalias key to include _CLS. This patch appends PCI-defined class-code to the existing ACPI modalias as following. acpi::::..::: E.g: # cat /sys/devices/platform/AMDI0600:00/modalias acpi:AMDI0600:010601: where bb is th base-class code, ss is te sub-class code, and pp is the programming interface code Since there would not be _HID/_CID in the ACPI matching table of the driver, this patch adds a field to acpi_device_id to specify the matching _CLS. static const struct acpi_device_id ahci_acpi_match[] = { { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) }, {}, }; In this case, the corresponded entry in modules.alias file would be: alias acpi*:010601:* ahci_platform Acked-by: Mika Westerberg Reviewed-by: Hanjun Guo Signed-off-by: Suravee Suthikulpanit Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 2649a06..ec25635 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1019,6 +1019,29 @@ static bool acpi_of_match_device(struct acpi_device *adev, return false; } +static bool __acpi_match_device_cls(const struct acpi_device_id *id, + struct acpi_hardware_id *hwid) +{ + int i, msk, byte_shift; + char buf[3]; + + if (!id->cls) + return false; + + /* Apply class-code bitmask, before checking each class-code byte */ + for (i = 1; i <= 3; i++) { + byte_shift = 8 * (3 - i); + msk = (id->cls_msk >> byte_shift) & 0xFF; + if (!msk) + continue; + + sprintf(buf, "%02x", (id->cls >> byte_shift) & msk); + if (strncmp(buf, &hwid->id[(i - 1) * 2], 2)) + return false; + } + return true; +} + static const struct acpi_device_id *__acpi_match_device( struct acpi_device *device, const struct acpi_device_id *ids, @@ -1036,9 +1059,12 @@ static const struct acpi_device_id *__acpi_match_device( list_for_each_entry(hwid, &device->pnp.ids, list) { /* First, check the ACPI/PNP IDs provided by the caller. */ - for (id = ids; id->id[0]; id++) - if (!strcmp((char *) id->id, hwid->id)) + for (id = ids; id->id[0] || id->cls; id++) { + if (id->id[0] && !strcmp((char *) id->id, hwid->id)) return id; + else if (id->cls && __acpi_match_device_cls(id, hwid)) + return id; + } /* * Next, check ACPI_DT_NAMESPACE_HID and try to match the @@ -2101,6 +2127,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, if (info->valid & ACPI_VALID_UID) pnp->unique_id = kstrdup(info->unique_id.string, GFP_KERNEL); + if (info->valid & ACPI_VALID_CLS) + acpi_add_id(pnp, info->class_code.string); kfree(info); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c471dfc..bdfdb9f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -58,6 +58,19 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +/** + * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with + * the PCI-defined class-code information + * + * @_cls : the class, subclass, prog-if triple for this device + * @_msk : the class mask for this device + * + * This macro is used to create a struct acpi_device_id that matches a + * specific PCI class. The .id and .driver_data fields will be left + * initialized with the default value. + */ +#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk), + static inline bool has_acpi_companion(struct device *dev) { return is_acpi_node(dev->fwnode); @@ -446,6 +459,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), struct fwnode_handle; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8183d66..34f25b7 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -189,6 +189,8 @@ struct css_device_id { struct acpi_device_id { __u8 id[ACPI_ID_LEN]; kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; }; #define PNP_ID_LEN 8 diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index eff7de1..e70fcd1 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -63,6 +63,8 @@ int main(void) DEVID(acpi_device_id); DEVID_FIELD(acpi_device_id, id); + DEVID_FIELD(acpi_device_id, cls); + DEVID_FIELD(acpi_device_id, cls_msk); DEVID(pnp_device_id); DEVID_FIELD(pnp_device_id, id); diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 84c86f3..5f20882 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -523,12 +523,40 @@ static int do_serio_entry(const char *filename, } ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry); -/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */ +/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or + * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if) + * + * NOTE: Each driver should use one of the following : _HID, _CIDs + * or _CLS. Also, bb, ss, and pp can be substituted with ?? + * as don't care byte. + */ static int do_acpi_entry(const char *filename, void *symval, char *alias) { DEF_FIELD_ADDR(symval, acpi_device_id, id); - sprintf(alias, "acpi*:%s:*", *id); + DEF_FIELD_ADDR(symval, acpi_device_id, cls); + DEF_FIELD_ADDR(symval, acpi_device_id, cls_msk); + + if (id && strlen((const char *)*id)) + sprintf(alias, "acpi*:%s:*", *id); + else if (cls) { + int i, byte_shift, cnt = 0; + unsigned int msk; + + sprintf(&alias[cnt], "acpi*:"); + cnt = 6; + for (i = 1; i <= 3; i++) { + byte_shift = 8 * (3-i); + msk = (*cls_msk >> byte_shift) & 0xFF; + if (msk) + sprintf(&alias[cnt], "%02x", + (*cls >> byte_shift) & 0xFF); + else + sprintf(&alias[cnt], "??"); + cnt += 2; + } + sprintf(&alias[cnt], ":*"); + } return 1; } ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry); -- cgit v0.10.2 From 2051e9248688e4c36e4de2e93b88fa0d74e86261 Mon Sep 17 00:00:00 2001 From: "Suthikulpanit, Suravee" Date: Tue, 7 Jul 2015 01:55:21 +0200 Subject: ata: ahci_platform: Add ACPI _CLS matching This patch adds ACPI supports for AHCI platform driver, which uses _CLS method to match the device. The following is an example of ASL structure in DSDT for a SATA controller, which contains _CLS package to be matched by the ahci_platform driver: Device (AHC0) // AHCI Controller { Name(_HID, "AMDI0600") Name (_CCA, 1) Name (_CLS, Package (3) { 0x01, // Base Class: Mass Storage 0x06, // Sub-Class: serial ATA 0x01, // Interface: AHCI }) Name (_CRS, ResourceTemplate () { Memory32Fixed (ReadWrite, 0xE0300000, 0x00010000) Interrupt (ResourceConsumer, Level, ActiveHigh, Exclusive,,,) { 387 } }) } Also, since ATA driver should not require PCI support for ATA_ACPI, this patch removes dependency in the driver/ata/Kconfig. Acked-by: Tejun Heo Acked-by: Mika Westerberg Reviewed-by: Hanjun Guo Signed-off-by: Suravee Suthikulpanit Signed-off-by: Rafael J. Wysocki diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 6d17a3b..15e40ee 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR config ATA_ACPI bool "ATA ACPI Support" - depends on ACPI && PCI + depends on ACPI default y help This option adds support for ATA-related ACPI objects. diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 614c78f..1befb11 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include "ahci.h" #define DRV_NAME "ahci" @@ -79,12 +81,19 @@ static const struct of_device_id ahci_of_match[] = { }; MODULE_DEVICE_TABLE(of, ahci_of_match); +static const struct acpi_device_id ahci_acpi_match[] = { + { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, ahci_acpi_match); + static struct platform_driver ahci_driver = { .probe = ahci_probe, .remove = ata_platform_remove_one, .driver = { .name = DRV_NAME, .of_match_table = ahci_of_match, + .acpi_match_table = ahci_acpi_match, .pm = &ahci_pm_ops, }, }; -- cgit v0.10.2 From b32aadc1a8ed84afbe924cd2ced31cd6a2e67074 Mon Sep 17 00:00:00 2001 From: "Shreyas B. Prabhu" Date: Tue, 7 Jul 2015 01:39:23 +0530 Subject: powerpc/powernv: Fix race in updating core_idle_state core_idle_state is maintained for each core. It uses 0-7 bits to track whether a thread in the core has entered fastsleep or winkle. 8th bit is used as a lock bit. The lock bit is set in these 2 scenarios- - The thread is first in subcore to wakeup from sleep/winkle. - If its the last thread in the core about to enter sleep/winkle While the lock bit is set, if any other thread in the core wakes up, it loops until the lock bit is cleared before proceeding in the wakeup path. This helps prevent race conditions w.r.t fastsleep workaround and prevents threads from switching to process context before core/subcore resources are restored. But, in the path to sleep/winkle entry, we currently don't check for lock-bit. This exposes us to following race when running with subcore on- First thread in the subcorea Another thread in the same waking up core entering sleep/winkle lwarx r15,0,r14 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT stwcx. r15,0,r14 [Code to restore subcore state] lwarx r15,0,r14 [clear thread bit] stwcx. r15,0,r14 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS stw r15,0(r14) Here, after the thread entering sleep clears its thread bit in core_idle_state, the value is overwritten by the thread waking up. In such cases when the core enters fastsleep, code mistakes an idle thread as running. Because of this, the first thread waking up from fastsleep which is supposed to resync timebase skips it. So we can end up having a core with stale timebase value. This patch fixes the above race by looping on the lock bit even while entering the idle states. Signed-off-by: Shreyas B. Prabhu Fixes: 7b54e9f213f76 'powernv/powerpc: Add winkle support for offline cpus' Cc: stable@vger.kernel.org # 3.19+ Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index ccde8f0..112ccf4 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -52,6 +52,22 @@ .text /* + * Used by threads when the lock bit of core_idle_state is set. + * Threads will spin in HMT_LOW until the lock bit is cleared. + * r14 - pointer to core_idle_state + * r15 - used to load contents of core_idle_state + */ + +core_idle_lock_held: + HMT_LOW +3: lwz r15,0(r14) + andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT + bne 3b + HMT_MEDIUM + lwarx r15,0,r14 + blr + +/* * Pass requested state in r3: * r3 - PNV_THREAD_NAP/SLEEP/WINKLE * @@ -150,6 +166,10 @@ power7_enter_nap_mode: ld r14,PACA_CORE_IDLE_STATE_PTR(r13) lwarx_loop1: lwarx r15,0,r14 + + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT + bnel core_idle_lock_held + andc r15,r15,r7 /* Clear thread bit */ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS @@ -294,7 +314,7 @@ lwarx_loop2: * workaround undo code or resyncing timebase or restoring context * In either case loop until the lock bit is cleared. */ - bne core_idle_lock_held + bnel core_idle_lock_held cmpwi cr2,r15,0 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) @@ -319,15 +339,6 @@ lwarx_loop2: isync b common_exit -core_idle_lock_held: - HMT_LOW -core_idle_lock_loop: - lwz r15,0(14) - andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT - bne core_idle_lock_loop - HMT_MEDIUM - b lwarx_loop2 - first_thread_in_subcore: /* First thread in subcore to wakeup */ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT -- cgit v0.10.2 From cbe4f4434ded7f3e581b9f7db34a875d0246210e Mon Sep 17 00:00:00 2001 From: James Simmons Date: Thu, 25 Jun 2015 16:59:46 -0400 Subject: staging:lustre: remove irq.h from socklnd.h The header socklnd.h includes irq.h which is not need and doesn't exist in the OpenSFS lustre branch. Having irq.h in socklnd.h does break the build on the m68k platform. So we can safely remove it. Signed-off-by: James Simmons Tested-by: Guenter Roeck Tested-by: by: Geert Uytterhoeven Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index 7125eb9..8a9d4a0 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -31,7 +31,6 @@ #define DEBUG_PORTAL_ALLOC #define DEBUG_SUBSYSTEM S_LND -#include #include #include #include -- cgit v0.10.2 From f9c87a6f46d508eae0d9ae640be98d50f237f827 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 6 Jul 2015 17:58:19 +0200 Subject: s390/sclp: clear upper register halves in _sclp_print_early If the kernel is compiled with gcc 5.1 and the XZ compression option the decompress_kernel function calls _sclp_print_early in 64-bit mode while the content of the upper register half of %r6 is non-zero. This causes a specification exception on the servc instruction in _sclp_servc. The _sclp_print_early function saves and restores the upper registers halves but it fails to clear them for the 31-bit code of the mini sclp driver. Cc: Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 43c3169..ada0c07 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -270,6 +270,8 @@ ENTRY(_sclp_print_early) jno .Lesa2 ahi %r15,-80 stmh %r6,%r15,96(%r15) # store upper register halves + basr %r13,0 + lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves .Lesa2: lr %r10,%r2 # save string pointer lhi %r2,0 @@ -291,6 +293,8 @@ ENTRY(_sclp_print_early) .Lesa3: lm %r6,%r15,120(%r15) # restore registers br %r14 +.Lzeroes: + .fill 64,4,0 .LwritedataS4: .long 0x00760005 # SCLP command for write data -- cgit v0.10.2 From 747d34e7313034768aac83d679db43cedc5ab778 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Sat, 27 Jun 2015 23:11:44 +0200 Subject: clocksource/imx: Define clocksource for mx27 The rework of the imx clocksource driver missed to add an entry for imx27 which results in a boot failure on those machines. Add the proper CLOCKSOURCE_OF_DECLARE() entry for imx27 and map it to the imx21 init. Fixes: bef11c881ba5 'ARM: imx: initialize gpt device type for DT boot' Signed-off-by: Philippe Reynes Cc: linux-arm-kernel@lists.infradead.org Cc: daniel.lezcano@linaro.org Cc: fabio.estevam@freescale.com Cc: shawnguo@kernel.org Cc: kernel@pengutronix.de Link: http://lkml.kernel.org/r/1435439504-406-1-git-send-email-tremyfr@gmail.com Signed-off-by: Thomas Gleixner diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index 879c784..2d59038 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c @@ -529,6 +529,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np) CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); +CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt); CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); -- cgit v0.10.2 From 7c4a976cd55972b68c75a978f171b6db5df4ce66 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 7 Jul 2015 10:14:35 +0200 Subject: clockevents: Allow set-state callbacks to be optional Its mandatory for the drivers to provide set_state_{oneshot|periodic}() (only if related modes are supported) and set_state_shutdown() callbacks today, if they are implementing the new set-state interface. But this leads to unnecessary noop callbacks for drivers which don't want to implement them. Over that, it will lead to a full function call for nothing really useful. Lets make all set-state callbacks optional. Suggested-by: Daniel Lezcano Signed-off-by: Viresh Kumar Signed-off-by: Daniel Lezcano Link: http://lkml.kernel.org/r/1436256875-15562-1-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Thomas Gleixner diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 08ccc3d..50eb107 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev, /* The clockevent device is getting replaced. Shut it down. */ case CLOCK_EVT_STATE_SHUTDOWN: - return dev->set_state_shutdown(dev); + if (dev->set_state_shutdown) + return dev->set_state_shutdown(dev); + return 0; case CLOCK_EVT_STATE_PERIODIC: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) return -ENOSYS; - return dev->set_state_periodic(dev); + if (dev->set_state_periodic) + return dev->set_state_periodic(dev); + return 0; case CLOCK_EVT_STATE_ONESHOT: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return -ENOSYS; - return dev->set_state_oneshot(dev); + if (dev->set_state_oneshot) + return dev->set_state_oneshot(dev); + return 0; case CLOCK_EVT_STATE_ONESHOT_STOPPED: /* Core internal bug */ @@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev) if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; - /* New state-specific callbacks */ - if (!dev->set_state_shutdown) - return -EINVAL; - - if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && - !dev->set_state_periodic) - return -EINVAL; - - if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && - !dev->set_state_oneshot) - return -EINVAL; - return 0; } -- cgit v0.10.2 From 3f8dc44d88d3e86178eb9322c779c599f3745b21 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 7 Jul 2015 11:01:17 +1000 Subject: cxl: Fix refcounting in kernel API Currently the kernel API AFU dev refcounting is done on context start and stop. This patch moves this refcounting to context init and release, bringing it inline with how the userspace API does it. Without this we've seen the refcounting on the AFU get out of whack between the user and kernel API usage. This causes the AFU structures to be freed when they are actually still in use. This fixes some kref warnings we've been seeing and spurious ErrIVTE IRQs. Signed-off-by: Michael Neuling Acked-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 0c77240..729e085 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) afu = cxl_pci_to_afu(dev); + get_device(&afu->dev); ctx = cxl_context_alloc(); if (IS_ERR(ctx)) return ctx; @@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) rc = cxl_context_init(ctx, afu, false, NULL); if (rc) { kfree(ctx); + put_device(&afu->dev); return ERR_PTR(-ENOMEM); } cxl_assign_psn_space(ctx); @@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx) if (ctx->status != CLOSED) return -EBUSY; + put_device(&ctx->afu->dev); + cxl_context_free(ctx); return 0; @@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, } ctx->status = STARTED; - get_device(&ctx->afu->dev); out: mutex_unlock(&ctx->status_mutex); return rc; @@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element); /* Stop a context. Returns 0 on success, otherwise -Errno */ int cxl_stop_context(struct cxl_context *ctx) { - int rc; - - rc = __detach_context(ctx); - if (!rc) - put_device(&ctx->afu->dev); - return rc; + return __detach_context(ctx); } EXPORT_SYMBOL_GPL(cxl_stop_context); -- cgit v0.10.2 From 5a3f75e3f02836518ce49536e9c460ca8e1fa290 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 5 Jul 2015 17:12:32 +0000 Subject: x86/irq: Plug irq vector hotplug race Jin debugged a nasty cpu hotplug race which results in leaking a irq vector on the newly hotplugged cpu. cpu N cpu M native_cpu_up device_shutdown do_boot_cpu free_msi_irqs start_secondary arch_teardown_msi_irqs smp_callin default_teardown_msi_irqs setup_vector_irq arch_teardown_msi_irq __setup_vector_irq native_teardown_msi_irq lock(vector_lock) destroy_irq install vectors unlock(vector_lock) lock(vector_lock) ---> __clear_irq_vector unlock(vector_lock) lock(vector_lock) set_cpu_online unlock(vector_lock) This leaves the irq vector(s) which are torn down on CPU M stale in the vector array of CPU N, because CPU M does not see CPU N online yet. There is a similar issue with concurrent newly setup interrupts. The alloc/free protection of irq descriptors does not prevent the above race, because it merily prevents interrupt descriptors from going away or changing concurrently. Prevent this by moving the call to setup_vector_irq() into the vector_lock held region which protects set_cpu_online(): cpu N cpu M native_cpu_up device_shutdown do_boot_cpu free_msi_irqs start_secondary arch_teardown_msi_irqs smp_callin default_teardown_msi_irqs lock(vector_lock) arch_teardown_msi_irq setup_vector_irq() __setup_vector_irq native_teardown_msi_irq install vectors destroy_irq set_cpu_online unlock(vector_lock) lock(vector_lock) __clear_irq_vector unlock(vector_lock) So cpu M either sees the cpu N online before clearing the vector or cpu N installs the vectors after cpu M has cleared it. Reported-by: xiao jin Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Joerg Roedel Cc: Borislav Petkov Cc: Yanmin Zhang Link: http://lkml.kernel.org/r/20150705171102.141898931@linutronix.de diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 28eba2d..f813261 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -409,12 +409,6 @@ static void __setup_vector_irq(int cpu) int irq, vector; struct apic_chip_data *data; - /* - * vector_lock will make sure that we don't run into irq vector - * assignments that might be happening on another cpu in parallel, - * while we setup our initial vector to irq mappings. - */ - raw_spin_lock(&vector_lock); /* Mark the inuse vectors */ for_each_active_irq(irq) { data = apic_chip_data(irq_get_irq_data(irq)); @@ -436,16 +430,16 @@ static void __setup_vector_irq(int cpu) if (!cpumask_test_cpu(cpu, data->domain)) per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; } - raw_spin_unlock(&vector_lock); } /* - * Setup the vector to irq mappings. + * Setup the vector to irq mappings. Must be called with vector_lock held. */ void setup_vector_irq(int cpu) { int irq; + lockdep_assert_held(&vector_lock); /* * On most of the platforms, legacy PIC delivers the interrupts on the * boot cpu. But there are certain platforms where PIC interrupts are diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0bd8c1d..d3010aa 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -171,11 +171,6 @@ static void smp_callin(void) apic_ap_setup(); /* - * Need to setup vector mappings before we enable interrupts. - */ - setup_vector_irq(smp_processor_id()); - - /* * Save our processor parameters. Note: this information * is needed for clock calibration. */ @@ -239,11 +234,13 @@ static void notrace start_secondary(void *unused) check_tsc_sync_target(); /* - * We need to hold vector_lock so there the set of online cpus - * does not change while we are assigning vectors to cpus. Holding - * this lock ensures we don't half assign or remove an irq from a cpu. + * Lock vector_lock and initialize the vectors on this cpu + * before setting the cpu online. We must set it online with + * vector_lock held to prevent a concurrent setup/teardown + * from seeing a half valid vector space. */ lock_vector_lock(); + setup_vector_irq(smp_processor_id()); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); cpu_set_state_online(smp_processor_id()); -- cgit v0.10.2 From cbb24dc761d95fe39a7a122bb1b298e9604cae15 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 5 Jul 2015 17:12:33 +0000 Subject: x86/irq: Use proper locking in check_irq_vectors_for_cpu_disable() It's unsafe to examine fields in the irq descriptor w/o holding the descriptor lock. Add proper locking. While at it add a comment why the vector check can run lock less Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: xiao jin Cc: Joerg Roedel Cc: Borislav Petkov Cc: Yanmin Zhang Link: http://lkml.kernel.org/r/20150705171102.236544164@linutronix.de diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 88b36648..85ca76e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void) if (!desc) continue; + /* + * Protect against concurrent action removal, + * affinity changes etc. + */ + raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, data->affinity); cpumask_clear_cpu(this_cpu, &affinity_new); /* Do not count inactive or per-cpu irqs. */ - if (!irq_has_action(irq) || irqd_is_per_cpu(data)) + if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { + raw_spin_unlock(&desc->lock); continue; + } + raw_spin_unlock(&desc->lock); /* * A single irq may be mapped to multiple * cpu's vector_irq[] (for example IOAPIC cluster @@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void) * vector. If the vector is marked in the used vectors * bitmap or an irq is assigned to it, we don't count * it as available. + * + * As this is an inaccurate snapshot anyway, we can do + * this w/o holding vector_lock. */ for (vector = FIRST_EXTERNAL_VECTOR; vector < first_system_vector; vector++) { -- cgit v0.10.2 From 09cf92b784fae6109450c5d64f9908066d605249 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 5 Jul 2015 17:12:35 +0000 Subject: x86/irq: Retrieve irq data after locking irq_desc irq_data is protected by irq_desc->lock, so retrieving the irq chip from irq_data outside the lock is racy vs. an concurrent update. Move it into the lock held region. While at it add a comment why the vector walk does not require vector_lock. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: xiao jin Cc: Joerg Roedel Cc: Borislav Petkov Cc: Yanmin Zhang Link: http://lkml.kernel.org/r/20150705171102.331320612@linutronix.de diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 85ca76e..c7dfe1b 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -497,6 +497,11 @@ void fixup_irqs(void) */ mdelay(1); + /* + * We can walk the vector array of this cpu without holding + * vector_lock because the cpu is already marked !online, so + * nothing else will touch it. + */ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irr; @@ -508,9 +513,9 @@ void fixup_irqs(void) irq = __this_cpu_read(vector_irq[vector]); desc = irq_to_desc(irq); + raw_spin_lock(&desc->lock); data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); - raw_spin_lock(&desc->lock); if (chip->irq_retrigger) { chip->irq_retrigger(data); __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); -- cgit v0.10.2 From 6d3dab7d84177f836b14961b4d252d0959d66768 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Jul 2015 13:08:39 +0200 Subject: PM / wakeirq: Avoid setting power.wakeirq too hastily If dev_pm_attach_wake_irq() fails, the device's power.wakeirq field should not be set to point to the struct wake_irq passed to that function, as that object will be freed going forward. For this reason, make dev_pm_attach_wake_irq() first call device_wakeup_attach_irq() and only set the device's power.wakeirq field if that's successful. That requires device_wakeup_attach_irq() to be called under the device's power.lock lock, but since dev_pm_attach_wake_irq() is the only caller of it, the requisite changes are easy to make. Fixes: 4990d4fe327b (PM / Wakeirq: Add automated device wake IRQ handling) Reported-by: Felipe Balbi Tested-by: Tony Lindgren Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 7470004..eb6e674 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -45,14 +45,12 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq, return -EEXIST; } - dev->power.wakeirq = wirq; - spin_unlock_irqrestore(&dev->power.lock, flags); - err = device_wakeup_attach_irq(dev, wirq); - if (err) - return err; + if (!err) + dev->power.wakeirq = wirq; - return 0; + spin_unlock_irqrestore(&dev->power.lock, flags); + return err; } /** @@ -105,10 +103,10 @@ void dev_pm_clear_wake_irq(struct device *dev) return; spin_lock_irqsave(&dev->power.lock, flags); + device_wakeup_detach_irq(dev); dev->power.wakeirq = NULL; spin_unlock_irqrestore(&dev->power.lock, flags); - device_wakeup_detach_irq(dev); if (wirq->dedicated_irq) free_irq(wirq->irq, wirq); kfree(wirq); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 7332ebc..15d27d7 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -247,32 +247,25 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable); * Attach a device wakeirq to the wakeup source so the device * wake IRQ can be configured automatically for suspend and * resume. + * + * Call under the device's power.lock lock. */ int device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq) { struct wakeup_source *ws; - int ret = 0; - spin_lock_irq(&dev->power.lock); ws = dev->power.wakeup; if (!ws) { dev_err(dev, "forgot to call call device_init_wakeup?\n"); - ret = -EINVAL; - goto unlock; + return -EINVAL; } - if (ws->wakeirq) { - ret = -EEXIST; - goto unlock; - } + if (ws->wakeirq) + return -EEXIST; ws->wakeirq = wakeirq; - -unlock: - spin_unlock_irq(&dev->power.lock); - - return ret; + return 0; } /** @@ -280,20 +273,16 @@ unlock: * @dev: Device to handle * * Removes a device wakeirq from the wakeup source. + * + * Call under the device's power.lock lock. */ void device_wakeup_detach_irq(struct device *dev) { struct wakeup_source *ws; - spin_lock_irq(&dev->power.lock); ws = dev->power.wakeup; - if (!ws) - goto unlock; - - ws->wakeirq = NULL; - -unlock: - spin_unlock_irq(&dev->power.lock); + if (ws) + ws->wakeirq = NULL; } /** -- cgit v0.10.2 From 41315b793e13f884cda79389f0d5d44d027e57d1 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Tue, 7 Jul 2015 17:03:36 +0800 Subject: drm/rockchip: use drm_gem_mmap helpers Rather than (incompletely [0]) re-implementing drm_gem_mmap() and drm_gem_mmap_obj() helpers, call them directly from the rockchip mmap routines. Once the core functions return successfully, the rockchip mmap routines can still use dma_mmap_attrs() to simply mmap the entire buffer. [0] Previously, we were performing the mmap() without first taking a reference on the underlying gem buffer. This could leak ptes if the gem object is destroyed while userspace is still holding the mapping. Signed-off-by: Daniel Kurtz Reviewed-by: Daniel Vetter Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index eb2282c..eba5f8a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) &rk_obj->dma_attrs); } -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) + { + int ret; struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; - unsigned long vm_size; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > obj->size) - return -EINVAL; + /* + * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). + */ + vma->vm_flags &= ~VM_PFNMAP; - return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, + ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, &rk_obj->dma_attrs); + if (ret) + drm_gem_vm_close(vma); + + return ret; } -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +int rockchip_gem_mmap_buf(struct drm_gem_object *obj, + struct vm_area_struct *vma) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_gem_object *obj; - struct drm_vma_offset_node *node; + struct drm_device *drm = obj->dev; int ret; - if (drm_device_is_unplugged(dev)) - return -ENODEV; + mutex_lock(&drm->struct_mutex); + ret = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&drm->struct_mutex); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); + return rockchip_drm_gem_object_mmap(obj, vma); +} - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); - if (!node) { - mutex_unlock(&dev->struct_mutex); - DRM_ERROR("failed to find vma node.\n"); - return -EINVAL; - } else if (!drm_vma_node_is_allowed(node, filp)) { - mutex_unlock(&dev->struct_mutex); - return -EACCES; - } +/* drm driver mmap file operations */ +int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_object *obj; + int ret; - obj = container_of(node, struct drm_gem_object, vma_node); - ret = rockchip_gem_mmap_buf(obj, vma); + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; - mutex_unlock(&dev->struct_mutex); + obj = vma->vm_private_data; - return ret; + return rockchip_drm_gem_object_mmap(obj, vma); } struct rockchip_gem_object * -- cgit v0.10.2 From 01447e9f04ba1c49a9534ae6a5a6f26c2bb05226 Mon Sep 17 00:00:00 2001 From: Zhao Junwang Date: Tue, 7 Jul 2015 17:08:35 +0800 Subject: drm: add a check for x/y in drm_mode_setcrtc legacy setcrtc ioctl does take a 32 bit value which might indeed overflow the checks of crtc_req->x > INT_MAX and crtc_req->y > INT_MAX aren't needed any more with this v2: -polish the annotation according to Daniel's comment Cc: Daniel Vetter Signed-off-by: Zhao Junwang Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b9ba061..357bd04 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - /* For some reason crtc x/y offsets are signed internally. */ - if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) + /* + * Universal plane src offsets are only 16.16, prevent havoc for + * drivers using universal plane code internally. + */ + if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) return -ERANGE; drm_modeset_lock_all(dev); -- cgit v0.10.2 From bb8bd38b9a1685334b73e8c62e128cbedb875867 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 1 Jul 2015 12:57:40 -0400 Subject: bio integrity: do not assume bio_integrity_pool exists if bioset exists bio_integrity_alloc() and bio_integrity_free() assume that if a bio was allocated from a bioset that that bioset also had its bio_integrity_pool allocated using bioset_integrity_create(). This is a very bad assumption given that bioset_create() and bioset_integrity_create() are completely disjoint. Not all callers of bioset_create() have been trained to also call bioset_integrity_create() -- and they may not care to be. Fix this by falling back to kmalloc'ing 'struct bio_integrity_payload' rather than force all bioset consumers to (wastefully) preallocate a bio_integrity_pool that they very likely won't actually need (given the niche nature of the current block integrity support). Otherwise, a NULL pointer "Kernel BUG" with a trace like the following will be observed (as seen on s390x using zfcp storage) because dm-io doesn't use bioset_integrity_create() when creating its bioset: [ 791.643338] Call Trace: [ 791.643339] ([<00000003df98b848>] 0x3df98b848) [ 791.643341] [<00000000002c5de8>] bio_integrity_alloc+0x48/0xf8 [ 791.643348] [<00000000002c6486>] bio_integrity_prep+0xae/0x2f0 [ 791.643349] [<0000000000371e38>] blk_queue_bio+0x1c8/0x3d8 [ 791.643355] [<000000000036f8d0>] generic_make_request+0xc0/0x100 [ 791.643357] [<000000000036f9b2>] submit_bio+0xa2/0x198 [ 791.643406] [<000003ff801f9774>] dispatch_io+0x15c/0x3b0 [dm_mod] [ 791.643419] [<000003ff801f9b3e>] dm_io+0x176/0x2f0 [dm_mod] [ 791.643423] [<000003ff8074b28a>] do_reads+0x13a/0x1a8 [dm_mirror] [ 791.643425] [<000003ff8074b43a>] do_mirror+0x142/0x298 [dm_mirror] [ 791.643428] [<0000000000154fca>] process_one_work+0x18a/0x3f8 [ 791.643432] [<000000000015598a>] worker_thread+0x132/0x3b0 [ 791.643435] [<000000000015d49a>] kthread+0xd2/0xd8 [ 791.643438] [<00000000005bc0ca>] kernel_thread_starter+0x6/0xc [ 791.643446] [<00000000005bc0c4>] kernel_thread_starter+0x0/0xc Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 0436c21..719b715 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, unsigned long idx = BIO_POOL_NONE; unsigned inline_vecs; - if (!bs) { + if (!bs || !bs->bio_integrity_pool) { bip = kmalloc(sizeof(struct bio_integrity_payload) + sizeof(struct bio_vec) * nr_vecs, gfp_mask); inline_vecs = nr_vecs; @@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio) kfree(page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset); - if (bs) { + if (bs && bs->bio_integrity_pool) { if (bip->bip_slab != BIO_POOL_NONE) bvec_free(bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab); -- cgit v0.10.2 From 0762b23d23c1f23beab91a3af0fa89749b75f03c Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Tue, 7 Jul 2015 12:41:07 +0530 Subject: block: use FIELD_SIZEOF to calculate size of a field use FIELD_SIZEOF instead of open coding Signed-off-by: Maninder Singh Signed-off-by: Jens Axboe diff --git a/block/blk-core.c b/block/blk-core.c index 82819e6..627ed0c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume); int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * - sizeof(((struct request *)0)->cmd_flags)); + FIELD_SIZEOF(struct request, cmd_flags)); /* used for unplugging and affects IO latency/throughput - HIGHPRI */ kblockd_workqueue = alloc_workqueue("kblockd", -- cgit v0.10.2 From a322baad1003798312741b0cb97bd2c7511ccf61 Mon Sep 17 00:00:00 2001 From: Arianna Avanzini Date: Tue, 7 Jul 2015 03:08:15 +0200 Subject: block/blk-cgroup.c: free per-blkcg data when freeing the blkcg Currently, per-blkcg data is freed each time a policy is deactivated, that is also upon scheduler switch. However, when switching from a scheduler implementing a policy which requires per-blkcg data to another one, that same policy might be active on other devices, and therefore those same per-blkcg data could be still in use. This commit lets per-blkcg data be freed when the blkcg is freed instead of on policy deactivation. Signed-off-by: Arianna Avanzini Reported-and-tested-by: Michael Kaminsky Fixes: e48453c3 ("block, cgroup: implement policy-specific per-blkcg data") Signed-off-by: Jens Axboe diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9f97da5..5e2723f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -822,8 +822,13 @@ static void blkcg_css_free(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); - if (blkcg != &blkcg_root) + if (blkcg != &blkcg_root) { + int i; + + for (i = 0; i < BLKCG_MAX_POLS; i++) + kfree(blkcg->pd[i]); kfree(blkcg); + } } static struct cgroup_subsys_state * @@ -1162,8 +1167,6 @@ void blkcg_deactivate_policy(struct request_queue *q, kfree(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; - kfree(blkg->blkcg->pd[pol->plid]); - blkg->blkcg->pd[pol->plid] = NULL; spin_unlock(&blkg->blkcg->lock); } -- cgit v0.10.2 From c96af077016bc2e0e36a107e31910395fc677f57 Mon Sep 17 00:00:00 2001 From: Walter Lozano Date: Fri, 26 Jun 2015 02:25:57 +0000 Subject: ARM: socfpga: dts: Fix adxl34x formating and compatible string This patch fixes the formating of DTS bindings for the adxl34x digital accelerometer, and updates the compatible string after the deprecation of "adxl345x" compatible string. Signed-off-by: Walter Lozano Acked-by: Steffen Trumtrar Signed-off-by: Dinh Nguyen diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts index 71468a7..1ae6082 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts @@ -73,14 +73,14 @@ status = "okay"; }; -&i2c1{ +&i2c1 { status = "okay"; - accel1: accel1@53{ - compatible = "adxl34x"; + accel1: accelerometer@53 { + compatible = "adi,adxl345"; reg = <0x53>; - interrupt-parent = < &portc >; + interrupt-parent = <&portc>; interrupts = <3 2>; }; }; -- cgit v0.10.2 From b6cfb277378ef831c0fa84bcff5049307294adc6 Mon Sep 17 00:00:00 2001 From: Al Stone Date: Mon, 6 Jul 2015 17:16:47 -0600 Subject: ACPI / ARM64: add BAD_MADT_GICC_ENTRY() macro The BAD_MADT_ENTRY() macro is designed to work for all of the subtables of the MADT. In the ACPI 5.1 version of the spec, the struct for the GICC subtable (struct acpi_madt_generic_interrupt) is 76 bytes long; in ACPI 6.0, the struct is 80 bytes long. But, there is only one definition in ACPICA for this struct -- and that is the 6.0 version. Hence, when BAD_MADT_ENTRY() compares the struct size to the length in the GICC subtable, it fails if 5.1 structs are in use, and there are systems in the wild that have them. This patch adds the BAD_MADT_GICC_ENTRY() that checks the GICC subtable only, accounting for the difference in specification versions that are possible. The BAD_MADT_ENTRY() will continue to work as is for all other MADT subtables. This code is being added to an arm64 header file since that is currently the only architecture using the GICC subtable of the MADT. As a GIC is specific to ARM, it is also unlikely the subtable will be used elsewhere. Fixes: aeb823bbacc2 ("ACPICA: ACPI 6.0: Add changes for FADT table.") Signed-off-by: Al Stone Acked-by: Will Deacon Acked-by: "Rafael J. Wysocki" [catalin.marinas@arm.com: extra brackets around macro arguments] Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 39248d3..406485e 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -19,6 +19,14 @@ #include #include +/* Macros for consistency checks of the GICC subtable of MADT */ +#define ACPI_MADT_GICC_LENGTH \ + (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ + (entry)->header.length != ACPI_MADT_GICC_LENGTH) + /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI /* ACPI table mapping after acpi_gbl_permanent_mmap is set */ -- cgit v0.10.2 From 99e3e3ae332b6ca91d5c444ea7849f367f5e5a76 Mon Sep 17 00:00:00 2001 From: Al Stone Date: Mon, 6 Jul 2015 17:16:48 -0600 Subject: ACPI / ARM64 : use the new BAD_MADT_GICC_ENTRY macro For those parts of the arm64 ACPI code that need to check GICC subtables in the MADT, use the new BAD_MADT_GICC_ENTRY macro instead of the previous BAD_MADT_ENTRY. The new macro takes into account differences in the size of the GICC subtable that the old macro did not; this caused failures even though the subtable entries are valid. Fixes: aeb823bbacc2 ("ACPICA: ACPI 6.0: Add changes for FADT table.") Signed-off-by: Al Stone Reviewed-by: Hanjun Guo Acked-by: Will Deacon Acked-by: "Rafael J. Wysocki" Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 695801a..50fb469 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -438,7 +438,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, struct acpi_madt_generic_interrupt *processor; processor = (struct acpi_madt_generic_interrupt *)header; - if (BAD_MADT_ENTRY(processor, end)) + if (BAD_MADT_GICC_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 8d7e1c8..4dd8826 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1055,7 +1055,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, processor = (struct acpi_madt_generic_interrupt *)header; - if (BAD_MADT_ENTRY(processor, end)) + if (BAD_MADT_GICC_ENTRY(processor, end)) return -EINVAL; /* -- cgit v0.10.2 From 93abc72bc7901cfc61a2871da7a565c9fdb26108 Mon Sep 17 00:00:00 2001 From: Walter Lozano Date: Fri, 26 Jun 2015 02:25:58 +0000 Subject: ARM: socfpga: dts: Fix entries order This patch reorders the nodes alphabetically Signed-off-by: Walter Lozano Signed-off-by: Dinh Nguyen diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts index 1ae6082..5e17fd1 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts @@ -60,15 +60,6 @@ rxc-skew-ps = <2000>; }; -&mmc0 { - vmmc-supply = <®ulator_3_3v>; - vqmmc-supply = <®ulator_3_3v>; -}; - -&usb1 { - status = "okay"; -}; - &gpio2 { status = "okay"; }; @@ -84,3 +75,12 @@ interrupts = <3 2>; }; }; + +&mmc0 { + vmmc-supply = <®ulator_3_3v>; + vqmmc-supply = <®ulator_3_3v>; +}; + +&usb1 { + status = "okay"; +}; -- cgit v0.10.2 From 6d545a632fbbce79492ba535b15ea0142aa3e80d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 7 Jul 2015 14:13:38 +0300 Subject: perf tools: Fix lockup using 32-bit compat vdso The __machine__findnew_compat() function is called only from __machine__findnew_vdso_compat() which is called only from machine__findnew_vdso() which already holds machine->dsos.lock, so remove locking from __machine__findnew_compat(). This manifests itself tracing 32-bit programs with a 64-bit perf. Signed-off-by: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Link: http://lkml.kernel.org/r/1436267618-20521-1-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c index 4b89118..44d440d 100644 --- a/tools/perf/util/vdso.c +++ b/tools/perf/util/vdso.c @@ -236,18 +236,16 @@ static struct dso *__machine__findnew_compat(struct machine *machine, const char *file_name; struct dso *dso; - pthread_rwlock_wrlock(&machine->dsos.lock); dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true); if (dso) - goto out_unlock; + goto out; file_name = vdso__get_compat_file(vdso_file); if (!file_name) - goto out_unlock; + goto out; dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name); -out_unlock: - pthread_rwlock_unlock(&machine->dsos.lock); +out: return dso; } -- cgit v0.10.2 From ef37566cf8d607844cee5a01597d828c8fd1a501 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 7 Jul 2015 17:15:39 +0100 Subject: arm64: Keep the ARM64 Kconfig selects sorted Move EDAC_SUPPORT to the right place. Signed-off-by: Catalin Marinas diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0f6edb1..318175f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -23,9 +23,9 @@ config ARM64 select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select EDAC_SUPPORT select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS + select EDAC_SUPPORT select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST if SMP -- cgit v0.10.2 From 8eb231261fdd20768db23863d00ef277de4b0543 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 14:11:00 +0200 Subject: tick/broadcast: Prevent hrtimer recursion The hrtimer based broadcast vehicle can cause a hrtimer recursion which went unnoticed until we changed the hrtimer expiry code to keep track of the currently running timer. local_timer_interrupt() local_handler() hrtimer_interrupt() expire_hrtimers() broadcast_hrtimer() send_ipis() local_handler() hrtimer_interrupt() .... Solution is simple: Prevent the local handler call from the broadcast code when the broadcast 'device' is hrtimer based. [ Split out from a larger combo patch ] Tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index d39f32c..a762040 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -265,8 +265,22 @@ static bool tick_do_broadcast(struct cpumask *mask) * Check, if the current cpu is in the mask */ if (cpumask_test_cpu(cpu, mask)) { + struct clock_event_device *bc = tick_broadcast_device.evtdev; + cpumask_clear_cpu(cpu, mask); - local = true; + /* + * We only run the local handler, if the broadcast + * device is not hrtimer based. Otherwise we run into + * a hrtimer recursion. + * + * local timer_interrupt() + * local_handler() + * expire_hrtimers() + * bc_handler() + * local_handler() + * expire_hrtimers() + */ + local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); } if (!cpumask_empty(mask)) { -- cgit v0.10.2 From e0454311903d3fd0f12a86c9e65d7b271c2bb05d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 14:07:27 +0200 Subject: tick/broadcast: Sanity check the shutdown of the local clock_event The broadcast code shuts down the local clock event unconditionally even if no broadcast device is installed or if the broadcast device is hrtimer based. Add proper sanity checks. [ Split out from a larger combo patch ] Reported-and-tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index a762040..9877d0b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { struct clock_event_device *bc = tick_broadcast_device.evtdev; unsigned long flags; - int ret; + int ret = 0; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); @@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) * If we kept the cpu in the broadcast mask, * tell the caller to leave the per cpu device * in shutdown state. The periodic interrupt - * is delivered by the broadcast device. + * is delivered by the broadcast device, if + * the broadcast device exists and is not + * hrtimer based. */ - ret = cpumask_test_cpu(cpu, tick_broadcast_mask); + if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER)) + ret = cpumask_test_cpu(cpu, tick_broadcast_mask); break; default: - /* Nothing to do */ - ret = 0; break; } } @@ -373,8 +374,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) case TICK_BROADCAST_ON: cpumask_set_cpu(cpu, tick_broadcast_on); if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { - if (tick_broadcast_device.mode == - TICKDEV_MODE_PERIODIC) + /* + * Only shutdown the cpu local device, if: + * + * - the broadcast device exists + * - the broadcast device is not a hrtimer based one + * - the broadcast device is in periodic mode to + * avoid a hickup during switch to oneshot mode + */ + if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && + tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) clockevents_shutdown(dev); } break; -- cgit v0.10.2 From f32dd117051185da6e923b35491a44d7debeeea5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 16:29:38 +0200 Subject: tick/broadcast: Make idle check independent from mode and config Currently the broadcast busy check, which prevents the idle code from going into deep idle, works only in one shot mode. If NOHZ and HIGHRES are off (config or command line) there is no sanity check at all, so under certain conditions cpus are allowed to go into deep idle, where the local timer stops, and are not woken up again because there is no broadcast timer installed or a hrtimer based broadcast device is not evaluated. Move tick_broadcast_oneshot_control() into the common code and provide proper subfunctions for the various config combinations. The common check in tick_broadcast_oneshot_control() is for the C3STOP misfeature flag of the local clock event device. If its not set, idle can proceed. If set, further checks are necessary. Provide checks for the trivial cases: - If broadcast is disabled in the config, then return busy - If oneshot mode (NOHZ/HIGHES) is disabled in the config, return busy if the broadcast device is hrtimer based. - If oneshot mode is enabled in the config call the original tick_broadcast_oneshot_control() function. That function needs extra checks which will be implemented in seperate patches. [ Split out from a larger combo patch ] Reported-and-tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/include/linux/tick.h b/include/linux/tick.h index 3741ba1..6916dcb 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -67,11 +67,7 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ -#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); -#else -static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } -#endif static inline void tick_broadcast_enable(void) { diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 9877d0b..ef77b16 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -685,18 +685,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); } -/** - * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode - * @state: The target state (enter/exit) - * - * The system enters/leaves a state, where affected devices might stop - * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. - * - * Called with interrupts disabled, so clockevents_lock is not - * required here because the local clock event device cannot go away - * under us. - */ -int tick_broadcast_oneshot_control(enum tick_broadcast_state state) +int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) { struct clock_event_device *bc, *dev; struct tick_device *td; @@ -717,9 +706,6 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state) td = this_cpu_ptr(&tick_cpu_device); dev = td->evtdev; - if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) - return 0; - raw_spin_lock(&tick_broadcast_lock); bc = tick_broadcast_device.evtdev; cpu = smp_processor_id(); @@ -961,6 +947,16 @@ bool tick_broadcast_oneshot_available(void) return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; } +#else +int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) +{ + struct clock_event_device *bc = tick_broadcast_device.evtdev; + + if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER)) + return -EBUSY; + + return 0; +} #endif void __init tick_broadcast_init(void) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 76446cb..55e13ef 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -343,6 +343,27 @@ out_bc: tick_install_broadcast_device(newdev); } +/** + * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode + * @state: The target state (enter/exit) + * + * The system enters/leaves a state, where affected devices might stop + * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. + * + * Called with interrupts disabled, so clockevents_lock is not + * required here because the local clock event device cannot go away + * under us. + */ +int tick_broadcast_oneshot_control(enum tick_broadcast_state state) +{ + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); + + if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP)) + return 0; + + return __tick_broadcast_oneshot_control(state); +} + #ifdef CONFIG_HOTPLUG_CPU /* * Transfer the do_timer job away from a dying cpu. diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 42fdf49..a4a8d4e 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu); static inline void tick_cancel_sched_timer(int cpu) { } #endif +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state); +#else +static inline int +__tick_broadcast_oneshot_control(enum tick_broadcast_state state) +{ + return -EBUSY; +} +#endif + #endif -- cgit v0.10.2 From b78f3f3c898c824bf56ab55cfa59fc72be49c349 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 16:34:32 +0200 Subject: tick/broadcast: Prevent deep idle if no broadcast device available Add a check for a installed broadcast device to the oneshot control function and return busy if not. [ Split out from a larger combo patch ] Reported-and-tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index ef77b16..fad3f78 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -693,6 +693,13 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) ktime_t now; /* + * If there is no broadcast device, tell the caller not to go + * into deep idle. + */ + if (!tick_broadcast_device.evtdev) + return -EBUSY; + + /* * Periodic mode does not care about the enter/exit of power * states */ -- cgit v0.10.2 From e3ac79e087ffe8a1f953ed44a74acf7616cb0b25 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 16:38:11 +0200 Subject: tick/broadcast: Move the check for periodic mode inside state handling We need to check more than the periodic mode for proper operation in all runtime combinations. To avoid code duplication move the check into the enter state handling. No functional change. [ Split out from a larger combo patch ] Reported-and-tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index fad3f78..83aa92e 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -688,7 +688,6 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) { struct clock_event_device *bc, *dev; - struct tick_device *td; int cpu, ret = 0; ktime_t now; @@ -699,25 +698,20 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) if (!tick_broadcast_device.evtdev) return -EBUSY; - /* - * Periodic mode does not care about the enter/exit of power - * states - */ - if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) - return 0; - - /* - * We are called with preemtion disabled from the depth of the - * idle code, so we can't be moved away. - */ - td = this_cpu_ptr(&tick_cpu_device); - dev = td->evtdev; + dev = this_cpu_ptr(&tick_cpu_device)->evtdev; raw_spin_lock(&tick_broadcast_lock); bc = tick_broadcast_device.evtdev; cpu = smp_processor_id(); if (state == TICK_BROADCAST_ENTER) { + /* + * If the broadcast device is in periodic mode, we + * return. + */ + if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) + goto out; + if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); broadcast_shutdown_local(bc, dev); -- cgit v0.10.2 From d33257264b0267a8fd20f6717abbb484c9e21130 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 17:45:22 +0200 Subject: tick/broadcast: Return busy if periodic mode and hrtimer broadcast If the system is in periodic mode and the broadcast device is hrtimer based, return busy as we have no proper handling for this. [ Split out from a larger combo patch ] Tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 83aa92e..da7b40f 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -709,8 +709,12 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) * If the broadcast device is in periodic mode, we * return. */ - if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) + if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { + /* If it is a hrtimer based broadcast, return busy */ + if (bc->features & CLOCK_EVT_FEAT_HRTIMER) + ret = -EBUSY; goto out; + } if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); -- cgit v0.10.2 From 0cc5281aa592d0020868f6ccaed359b4ad7b2684 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 16:45:15 +0200 Subject: tick/broadcast: Return busy when IPI is pending Tell the idle code not to go deep if the broadcast IPI is about to arrive. [ Split out from a larger combo patch ] Tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index da7b40f..70b47bc 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -725,11 +725,15 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) * if the cpu local event is earlier than the * broadcast event. If the current CPU is in * the force mask, then we are going to be - * woken by the IPI right away. + * woken by the IPI right away; we return + * busy, so the CPU does not try to go deep + * idle. */ - if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && - dev->next_event.tv64 < bc->next_event.tv64) + if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { + ret = -EBUSY; + } else if (dev->next_event.tv64 < bc->next_event.tv64) { tick_broadcast_set_event(bc, cpu, dev->next_event); + } } /* * If the current CPU owns the hrtimer broadcast -- cgit v0.10.2 From d5113e13a550bc9c2b53cc9944b8a06453c4a0a1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 16:43:04 +0200 Subject: tick/broadcast: Check for hrtimer broadcast active early If the current cpu is the one which has the hrtimer based broadcast queued then we better return busy immediately instead of going through loops and hoops to figure that out. [ Split out from a larger combo patch ] Tested-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: Suzuki Poulose Cc: Lorenzo Pieralisi Cc: Catalin Marinas Cc: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: Ingo Molnar Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1507070929360.3916@nanos diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 70b47bc..c8d731a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -706,6 +706,17 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) if (state == TICK_BROADCAST_ENTER) { /* + * If the current CPU owns the hrtimer broadcast + * mechanism, it cannot go deep idle and we do not add + * the CPU to the broadcast mask. We don't have to go + * through the EXIT path as the local timer is not + * shutdown. + */ + ret = broadcast_needs_cpu(bc, cpu); + if (ret) + goto out; + + /* * If the broadcast device is in periodic mode, we * return. */ @@ -718,7 +729,10 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); + + /* Conditionally shut down the local timer. */ broadcast_shutdown_local(bc, dev); + /* * We only reprogram the broadcast timer if we * did not mark ourself in the force mask and @@ -733,18 +747,20 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) ret = -EBUSY; } else if (dev->next_event.tv64 < bc->next_event.tv64) { tick_broadcast_set_event(bc, cpu, dev->next_event); + /* + * In case of hrtimer broadcasts the + * programming might have moved the + * timer to this cpu. If yes, remove + * us from the broadcast mask and + * return busy. + */ + ret = broadcast_needs_cpu(bc, cpu); + if (ret) { + cpumask_clear_cpu(cpu, + tick_broadcast_oneshot_mask); + } } } - /* - * If the current CPU owns the hrtimer broadcast - * mechanism, it cannot go deep idle and we remove the - * CPU from the broadcast mask. We don't have to go - * through the EXIT path as the local timer is not - * shutdown. - */ - ret = broadcast_needs_cpu(bc, cpu); - if (ret) - cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); } else { if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); -- cgit v0.10.2 From c4288334818c81c946acb23d2319881f58c3d497 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 5 Jul 2015 20:53:17 +0000 Subject: tick/broadcast: Handle spurious interrupts gracefully Andriy reported that on a virtual machine the warning about negative expiry time in the clock events programming code triggered: hpet: hpet0 irq 40 for MSI hpet: hpet1 irq 41 for MSI Switching to clocksource hpet WARNING: at kernel/time/clockevents.c:239 [] clockevents_program_event+0xdb/0xf0 [] tick_handle_periodic_broadcast+0x41/0x50 [] timer_interrupt+0x15/0x20 When the second hpet is installed as a per cpu timer the broadcast event is not longer required and stopped, which sets the next_evt of the broadcast device to KTIME_MAX. If after that a spurious interrupt happens on the broadcast device, then the current code blindly handles it and tries to reprogram the broadcast device afterwards, which adds the period to next_evt. KTIME_MAX + period results in a negative expiry value causing the WARN_ON in the clockevents code to trigger. Add a proper check for the state of the broadcast device into the interrupt handler and return if the interrupt is spurious. [ Folded in pointer fix from Sudeep ] Reported-by: Andriy Gapon Signed-off-by: Thomas Gleixner Cc: Sudeep Holla Cc: Peter Zijlstra Cc: Preeti U Murthy Link: http://lkml.kernel.org/r/20150705205221.802094647@linutronix.de diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index c8d731a..ee3cf94 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -316,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) bool bc_local; raw_spin_lock(&tick_broadcast_lock); + + /* Handle spurious interrupts gracefully */ + if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { + raw_spin_unlock(&tick_broadcast_lock); + return; + } + bc_local = tick_do_periodic_broadcast(); if (clockevent_state_oneshot(dev)) { -- cgit v0.10.2 From 539c4b88146cc320aee18b08ebb43ab57d523dcc Mon Sep 17 00:00:00 2001 From: duson Date: Tue, 7 Jul 2015 10:25:39 -0700 Subject: Input: elan_i2c - change the hover event from MT to ST The firmware only reports hover condition while the very first contact is approaching the surface; the hover is not reported for the subsequent contacts. Therefore we should not be using ABS_MT_DISTANCE to report hover but rather its single-touch counterpart ABS_DISTANCE. Signed-off-by: Duson Lin Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 62641f2..5b5f403 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -771,7 +771,7 @@ static const struct attribute_group *elan_sysfs_groups[] = { */ static void elan_report_contact(struct elan_tp_data *data, int contact_num, bool contact_valid, - bool hover_event, u8 *finger_data) + u8 *finger_data) { struct input_dev *input = data->input; unsigned int pos_x, pos_y; @@ -815,9 +815,7 @@ static void elan_report_contact(struct elan_tp_data *data, input_mt_report_slot_state(input, MT_TOOL_FINGER, true); input_report_abs(input, ABS_MT_POSITION_X, pos_x); input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y); - input_report_abs(input, ABS_MT_DISTANCE, hover_event); - input_report_abs(input, ABS_MT_PRESSURE, - hover_event ? 0 : scaled_pressure); + input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure); input_report_abs(input, ABS_TOOL_WIDTH, mk_x); input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); @@ -839,14 +837,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) hover_event = hover_info & 0x40; for (i = 0; i < ETP_MAX_FINGERS; i++) { contact_valid = tp_info & (1U << (3 + i)); - elan_report_contact(data, i, contact_valid, hover_event, - finger_data); + elan_report_contact(data, i, contact_valid, finger_data); if (contact_valid) finger_data += ETP_FINGER_DATA_LEN; } input_report_key(input, BTN_LEFT, tp_info & 0x01); + input_report_abs(input, ABS_DISTANCE, hover_event != 0); input_mt_report_pointer_emulation(input, true); input_sync(input); } @@ -922,6 +920,7 @@ static int elan_setup_input_device(struct elan_tp_data *data) input_abs_set_res(input, ABS_Y, data->y_res); input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0); + input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0); /* And MT parameters */ input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0); @@ -934,7 +933,6 @@ static int elan_setup_input_device(struct elan_tp_data *data) ETP_FINGER_WIDTH * max_width, 0, 0); input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, ETP_FINGER_WIDTH * min_width, 0, 0); - input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0); data->input = input; -- cgit v0.10.2 From 71eeedcf51544831ae356a773814401143ed32d4 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 7 Jul 2015 20:49:03 +0200 Subject: MIPS: Lemote 2F: Fix build caused by recent mass rename. CC arch/mips/loongson64/lemote-2f/clock.o /home/ralf/src/linux/linux-mips/arch/mips/loongson64/lemote-2f/clock.c:18:40: fatal error: asm/mach-loongson/loongson.h: No such file or directory #include ^ compilation terminated. Signed-off-by: Ralf Baechle diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c index 462e34d..4c5b94a 100644 --- a/arch/mips/loongson64/lemote-2f/clock.c +++ b/arch/mips/loongson64/lemote-2f/clock.c @@ -15,7 +15,7 @@ #include #include -#include +#include static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); -- cgit v0.10.2 From 0bb383a2d8f51e32ecc156f3f84067420ffe6d20 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 7 Jul 2015 20:56:04 +0200 Subject: MIPS, CPUFREQ: Fix spelling of Institute. Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index 37c08a2..c9f7e23 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & - * Insititute of Computing Technology + * Institute of Computing Technology * Author: Xiang Gao, gaoxiang@ict.ac.cn * Huacai Chen, chenhc@lemote.com * Xiaofu Meng, Shuangshuang Zhang diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson64/common/bonito-irq.c index cc0e4fd..4e116d2 100644 --- a/arch/mips/loongson64/common/bonito-irq.c +++ b/arch/mips/loongson64/common/bonito-irq.c @@ -3,7 +3,7 @@ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org) * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c index 72fed00..01fbed1 100644 --- a/arch/mips/loongson64/common/cmdline.c +++ b/arch/mips/loongson64/common/cmdline.c @@ -6,7 +6,7 @@ * Copyright 2003 ICT CAS * Author: Michael Guo * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * Copyright (C) 2009 Lemote Inc. diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c index 12c75db..8750370 100644 --- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c +++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c @@ -1,7 +1,7 @@ /* * CS5536 General timer functions * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Yanhua, yanh@lemote.com * * Copyright (C) 2009 Lemote Inc. diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index 22f04ca..f6c44dd 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -6,7 +6,7 @@ * Copyright 2003 ICT CAS * Author: Michael Guo * - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * Copyright (C) 2009 Lemote Inc. diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson64/common/irq.c index 687003b..d36d969 100644 --- a/arch/mips/loongson64/common/irq.c +++ b/arch/mips/loongson64/common/irq.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson64/common/setup.c index d477dd6..2dc5122 100644 --- a/arch/mips/loongson64/common/setup.c +++ b/arch/mips/loongson64/common/setup.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson64/fuloong-2e/irq.c index ef5ec8f..892963f8 100644 --- a/arch/mips/loongson64/fuloong-2e/irq.c +++ b/arch/mips/loongson64/fuloong-2e/irq.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c index 4c5b94a..a78fb65 100644 --- a/arch/mips/loongson64/lemote-2f/clock.c +++ b/arch/mips/loongson64/lemote-2f/clock.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology * Author: Yanhua, yanh@lemote.com * * This file is subject to the terms and conditions of the GNU General Public diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 12d14ed..6f9e010 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & - * Insititute of Computing Technology + * Institute of Computing Technology * Author: Xiang Gao, gaoxiang@ict.ac.cn * Huacai Chen, chenhc@lemote.com * Xiaofu Meng, Shuangshuang Zhang diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index fc897ba..e362860 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -3,7 +3,7 @@ * * The 2E revision of loongson processor not support this feature. * - * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology + * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology * Author: Yanhua, yanh@lemote.com * * This file is subject to the terms and conditions of the GNU General Public -- cgit v0.10.2 From 37b64a42067a04a22468c4e52c12af00d72e462b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jul 2015 21:56:34 +0200 Subject: tick/broadcast: Unbreak CONFIG_GENERIC_CLOCKEVENTS=n build Making tick_broadcast_oneshot_control() independent from CONFIG_GENERIC_CLOCKEVENTS_BROADCAST broke the build for CONFIG_GENERIC_CLOCKEVENTS=n because the function is not defined there. Provide a proper stub inline. Fixes: f32dd1170511 'tick/broadcast: Make idle check independent from mode and config' Reported-by: kbuild test robot Signed-off-by: Thomas Gleixner diff --git a/include/linux/tick.h b/include/linux/tick.h index 6916dcb..edbfc9a 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -67,7 +67,14 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ +#ifdef CONFIG_GENERIC_CLOCKEVENTS extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); +#else +static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) +{ + return 0; +} +#endif static inline void tick_broadcast_enable(void) { -- cgit v0.10.2 From d4acc1651588835b802a2049f4f3a2adcc1af750 Mon Sep 17 00:00:00 2001 From: Graham Whaley Date: Tue, 7 Jul 2015 19:13:37 +0100 Subject: Documentation: drm: Fix tablulation in KMS properties table Commit 712a0dd91c4a ("Documentation/drm: Update rotation property") left an extra 'rowspan' for the row omap, which pushed the following qxl rows columns out to column 8 and broke the tabulation. Remove the errant rowspan. Signed-off-by: Graham Whaley Signed-off-by: Daniel Vetter diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index c0312cb..2fb9a54 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev) TBD - omap + omap Generic “zorder” RANGE -- cgit v0.10.2 From 4f273959b850569253299987eee611927f048de7 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Wed, 8 Jul 2015 00:22:03 +0300 Subject: mei: nfc: fix deadlock on shutdown/suspend path In function mei_nfc_host_exit mei_cl_remove_device cannot be called under the device mutex as device removing flow invokes the device driver remove handler that calls in turn to mei_cl_disable_device which naturally acquires the device mutex. Also remove mei_cl_bus_remove_devices which has the same issue, but is never executed as currently the only device on the mei client bus is NFC and a new device cannot be easily added till the bus revamp is completed. This fixes regression caused by commit be9b720a0ccb ("mei_phy: move all nfc logic from mei driver to nfc") Prior to this change the nfc driver remove handler called to no-op disable function while actual nfc device was disabled directly from the mei driver. Reported-by: Linus Torvalds Acked-by: Greg Kroah-Hartman Cc: Samuel Ortiz Signed-off-by: Tomas Winkler Signed-off-by: Linus Torvalds diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 357b6ae..458aa5a 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl) schedule_work(&device->event_work); } -void mei_cl_bus_remove_devices(struct mei_device *dev) -{ - struct mei_cl *cl, *next; - - mutex_lock(&dev->device_lock); - list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { - if (cl->device) - mei_cl_remove_device(cl->device); - - list_del(&cl->device_link); - mei_cl_unlink(cl); - kfree(cl); - } - mutex_unlock(&dev->device_lock); -} - int __init mei_cl_bus_init(void) { return bus_register(&mei_cl_bus_type); diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 94514b2..00c3865 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev) mei_nfc_host_exit(dev); - mei_cl_bus_remove_devices(dev); - mutex_lock(&dev->device_lock); mei_wd_stop(dev); diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index b983c4e..290ef30 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c @@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev) cldev->priv_data = NULL; - mutex_lock(&dev->device_lock); /* Need to remove the device here * since mei_nfc_free will unlink the clients */ mei_cl_remove_device(cldev); + + mutex_lock(&dev->device_lock); mei_nfc_free(ndev); mutex_unlock(&dev->device_lock); } -- cgit v0.10.2 From 56551da9255f20ffd3a9711728a1a3ad4b7100af Mon Sep 17 00:00:00 2001 From: Pankaj Dev Date: Tue, 7 Jul 2015 09:40:50 +0200 Subject: drivers: clk: st: Incorrect register offset used for lock_status Incorrect register offset used for sthi407 clockgenC Signed-off-by: Pankaj Dev Signed-off-by: Gabriel Fernandez Fixes: 51306d56ba81 ("clk: st: STiH407: Support for clockgenC0") Signed-off-by: Stephen Boyd diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 99c98c1..d9eb2e1 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = { CLKGEN_FIELD(0x30c, 0xf, 20), CLKGEN_FIELD(0x310, 0xf, 20) }, .lockstatus_present = true, - .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), + .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24), .powerup_polarity = 1, .standby_polarity = 1, .pll_ops = &st_quadfs_pll_c32_ops, -- cgit v0.10.2 From 7928eb0370d1133d0d8cd2f5ddfca19c309079d5 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 8 Jul 2015 04:49:10 +0200 Subject: MIPS: O32: Do not handle require 32 bytes from the stack to be readable. Commit 46e12c07b3b9603c60fc1d421ff18618241cb081 (MIPS: O32 / 32-bit: Always copy 4 stack arguments.) change the O32 syscall handler to always load four arguments from the userspace stack even for syscalls that require fewer or no arguments to be copied. This removes a large table from kernel space and need to maintain it. It appeared that it was ok the implementation chosen requires 16 bytes of readable stack space above the user stack pointer. Turned out a few threading implementations munmap the user stack before the thread exits resulting in errors due to the unreadable stack. We now treat any failed load as a if the loaded value was zero and let the actual syscall deal with the situation. Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 6e8de80..4cc1350 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp) .set noreorder .set nomacro -1: user_lw(t5, 16(t0)) # argument #5 from usp -4: user_lw(t6, 20(t0)) # argument #6 from usp -3: user_lw(t7, 24(t0)) # argument #7 from usp -2: user_lw(t8, 28(t0)) # argument #8 from usp +load_a4: user_lw(t5, 16(t0)) # argument #5 from usp +load_a5: user_lw(t6, 20(t0)) # argument #6 from usp +load_a6: user_lw(t7, 24(t0)) # argument #7 from usp +load_a7: user_lw(t8, 28(t0)) # argument #8 from usp +loads_done: sw t5, 16(sp) # argument #5 to ksp sw t6, 20(sp) # argument #6 to ksp @@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp) .set pop .section __ex_table,"a" - PTR 1b,bad_stack - PTR 2b,bad_stack - PTR 3b,bad_stack - PTR 4b,bad_stack + PTR load_a4, bad_stack_a4 + PTR load_a5, bad_stack_a5 + PTR load_a6, bad_stack_a6 + PTR load_a7, bad_stack_a7 .previous lw t0, TI_FLAGS($28) # syscall tracing enabled? @@ -153,8 +154,8 @@ syscall_trace_entry: /* ------------------------------------------------------------------------ */ /* - * The stackpointer for a call with more than 4 arguments is bad. - * We probably should handle this case a bit more drastic. + * Our open-coded access area sanity test for the stack pointer + * failed. We probably should handle this case a bit more drastic. */ bad_stack: li v0, EFAULT @@ -163,6 +164,22 @@ bad_stack: sw t0, PT_R7(sp) j o32_syscall_exit +bad_stack_a4: + li t5, 0 + b load_a5 + +bad_stack_a5: + li t6, 0 + b load_a6 + +bad_stack_a6: + li t7, 0 + b load_a7 + +bad_stack_a7: + li t8, 0 + b loads_done + /* * The system call does not exist in this kernel */ diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d07b210..66d618b 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp) daddu t1, t0, 32 bltz t1, bad_stack -1: lw a4, 16(t0) # argument #5 from usp -2: lw a5, 20(t0) # argument #6 from usp -3: lw a6, 24(t0) # argument #7 from usp -4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls) +load_a4: lw a4, 16(t0) # argument #5 from usp +load_a5: lw a5, 20(t0) # argument #6 from usp +load_a6: lw a6, 24(t0) # argument #7 from usp +load_a7: lw a7, 28(t0) # argument #8 from usp +loads_done: .section __ex_table,"a" - PTR 1b, bad_stack - PTR 2b, bad_stack - PTR 3b, bad_stack - PTR 4b, bad_stack + PTR load_a4, bad_stack_a4 + PTR load_a5, bad_stack_a5 + PTR load_a6, bad_stack_a6 + PTR load_a7, bad_stack_a7 .previous li t1, _TIF_WORK_SYSCALL_ENTRY @@ -167,6 +168,22 @@ bad_stack: sd t0, PT_R7(sp) j o32_syscall_exit +bad_stack_a4: + li a4, 0 + b load_a5 + +bad_stack_a5: + li a5, 0 + b load_a6 + +bad_stack_a6: + li a6, 0 + b load_a7 + +bad_stack_a7: + li a7, 0 + b loads_done + not_o32_scall: /* * This is not an o32 compatibility syscall, pass it on -- cgit v0.10.2 From 5caaf5346892d1e7f0b8b7223062644f8538483f Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Tue, 7 Jul 2015 15:45:46 +1000 Subject: cxl: Fail mmap if requested mapping is larger than assigned problem state area This patch makes the mmap call fail outright if the requested region is larger than the problem state area assigned to the context so the error is reported immediately rather than waiting for an attempt to access an address out of bounds. Although we never expect users to map more than the assigned problem state area and are not aware of anyone doing this (other than for testing), this does have the potential to break users if someone has used a larger range regardless. I'm submitting it for consideration, but if this change is not considered acceptable the previous patch is sufficient to prevent access out of bounds without breaking anyone. Signed-off-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 2a4c80a..6236d4a 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = { */ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) { + u64 start = vma->vm_pgoff << PAGE_SHIFT; u64 len = vma->vm_end - vma->vm_start; - len = min(len, ctx->psn_size); + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { + if (start + len > ctx->afu->adapter->ps_size) + return -EINVAL; + } else { + if (start + len > ctx->psn_size) + return -EINVAL; + } if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { /* make sure there is a valid per process space for this AFU */ -- cgit v0.10.2 From 10a5894f2dedd8a26b3132497445b314c0d952c4 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Tue, 7 Jul 2015 15:45:45 +1000 Subject: cxl: Fix off by one error allowing subsequent mmap page to be accessed It was discovered that if a process mmaped their problem state area they were able to access one page more than expected, potentially allowing them to access the problem state area of an unrelated process. This was due to a simple off by one error in the mmap fault handler introduced in 0712dc7e73e59d79bcead5d5520acf4e9e917e87 ("cxl: Fix issues when unmapping contexts"), which is fixed in this patch. Cc: stable@vger.kernel.org Fixes: 0712dc7e73e5 ("cxl: Fix issues when unmapping contexts") Signed-off-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 6236d4a..1287148 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { area = ctx->afu->psn_phys; - if (offset > ctx->afu->adapter->ps_size) + if (offset >= ctx->afu->adapter->ps_size) return VM_FAULT_SIGBUS; } else { area = ctx->psn_phys; - if (offset > ctx->psn_size) + if (offset >= ctx->psn_size) return VM_FAULT_SIGBUS; } -- cgit v0.10.2 From 442053e57a4fc58b719b6ceab60f29ef9cf4404c Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 7 Jul 2015 12:21:10 -0400 Subject: powerpc/perf/24x7: Fix lockdep warning The sysfs attributes for the 24x7 counters are dynamically allocated. Initialize the attributes using sysfs_attr_init() to fix following warning which occurs when CONFIG_DEBUG_LOCK_VMALLOC=y. [ 0.346249] audit: initializing netlink subsys (disabled) [ 0.346284] audit: type=2000 audit(1436295254.340:1): initialized [ 0.346489] BUG: key c0000000efe90198 not in .data! [ 0.346491] DEBUG_LOCKS_WARN_ON(1) [ 0.346502] ------------[ cut here ]------------ [ 0.346504] WARNING: at ../kernel/locking/lockdep.c:3002 [ 0.346506] Modules linked in: Reported-by: Gustavo Luiz Duarte Signed-off-by: Sukadev Bhattiprolu Tested-by: Gustavo Luiz Duarte Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index ec2eb20..df95629 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str) if (!attr) return NULL; + sysfs_attr_init(&attr->attr.attr); + attr->var = str; attr->attr.attr.name = name; attr->attr.attr.mode = 0444; -- cgit v0.10.2 From 9958084a5275ca2e8f55c5b18729307f2f0cb53b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 7 Jul 2015 19:16:23 +1000 Subject: powerpc: Update MAINTAINERS to point at shared tree Now that we have a shared powerpc tree on kernel.org, point folks at that as the primary place to look for powerpc stuff. Signed-off-by: Michael Ellerman diff --git a/MAINTAINERS b/MAINTAINERS index 8133cef..b29f8eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6161,7 +6161,7 @@ M: Michael Ellerman W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git S: Supported F: Documentation/powerpc/ F: arch/powerpc/ -- cgit v0.10.2 From 030f4e968741d65aea9cd5f7814d1164967801ef Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 7 Jul 2015 17:30:25 +0800 Subject: crypto: nx - Fix reentrancy bugs This patch fixes a host of reentrancy bugs in the nx driver. The following algorithms are affected: * CCM * GCM * CTR * XCBC * SHA256 * SHA512 The crypto API allows a single transform to be used by multiple threads simultaneously. For example, IPsec will use a single tfm to process packets for a given SA. As packets may arrive on multiple CPUs that tfm must be reentrant. The nx driver does try to deal with this by using a spin lock. Unfortunately only the basic AES/CBC/ECB algorithms do this in the correct way. The symptom of these bugs may range from the generation of incorrect output to memory corruption. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 67f8081..e4311ce 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -494,8 +494,9 @@ out: static int ccm4309_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct blkcipher_desc desc; - u8 *iv = nx_ctx->priv.ccm.iv; + u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); @@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req) static int ccm4309_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct blkcipher_desc desc; - u8 *iv = nx_ctx->priv.ccm.iv; + u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 2617cd4..dd7e9f3 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm, if (key_len < CTR_RFC3686_NONCE_SIZE) return -EINVAL; - memcpy(nx_ctx->priv.ctr.iv, + memcpy(nx_ctx->priv.ctr.nonce, in_key + key_len - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); @@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc, unsigned int nbytes) { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); - u8 *iv = nx_ctx->priv.ctr.iv; + u8 iv[16]; + memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, desc->info, CTR_RFC3686_IV_SIZE); iv[12] = iv[13] = iv[14] = 0; iv[15] = 1; - desc->info = nx_ctx->priv.ctr.iv; + desc->info = iv; return ctr_aes_nx_crypt(desc, dst, src, nbytes); } diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 08ac6d4..92c993f 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -317,6 +317,7 @@ out: static int gcm_aes_nx_crypt(struct aead_request *req, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; @@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) spin_lock_irqsave(&nx_ctx->lock, irq_flags); - desc.info = nx_ctx->priv.gcm.iv; + desc.info = rctx->iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; @@ -424,8 +425,8 @@ out: static int gcm_aes_nx_encrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; memcpy(iv, req->iv, 12); @@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req) static int gcm_aes_nx_decrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; memcpy(iv, req->iv, 12); @@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req) static int gcm4106_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); @@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req) static int gcm4106_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 8c2faff..c2f7d4b 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; switch (key_len) { case AES_KEYSIZE_128: @@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, return -EINVAL; } - memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); + memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len); return 0; } @@ -148,32 +149,29 @@ out: return rc; } -static int nx_xcbc_init(struct shash_desc *desc) +static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm) { - struct xcbc_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; - struct nx_sg *out_sg; - int len; + int err; - nx_ctx_init(nx_ctx, HCOP_FC_AES); + err = nx_crypto_ctx_aes_xcbc_init(tfm); + if (err) + return err; - memset(sctx, 0, sizeof *sctx); + nx_ctx_init(nx_ctx, HCOP_FC_AES); NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; - memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); - memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); - - len = AES_BLOCK_SIZE; - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, nx_ctx->ap->sglen); + return 0; +} - if (len != AES_BLOCK_SIZE) - return -EINVAL; +static int nx_xcbc_init(struct shash_desc *desc) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + memset(sctx, 0, sizeof *sctx); return 0; } @@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; + struct nx_sg *out_sg; u32 to_process = 0, leftover, total; unsigned int max_sg_len; unsigned long irq_flags; @@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc, max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); + data_len = AES_BLOCK_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, nx_ctx->ap->sglen); + + if (data_len != AES_BLOCK_SIZE) { + rc = -EINVAL; + goto out; + } + + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + do { to_process = total - to_process; to_process = to_process & ~(AES_BLOCK_SIZE - 1); @@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc, (u8 *) sctx->buffer, &data_len, max_sg_len); - if (data_len != sctx->count) - return -EINVAL; + if (data_len != sctx->count) { + rc = -EINVAL; + goto out; + } } data_len = to_process - sctx->count; @@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc, &data_len, max_sg_len); - if (data_len != to_process - sctx->count) - return -EINVAL; + if (data_len != to_process - sctx->count) { + rc = -EINVAL; + goto out; + } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); @@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, &len, nx_ctx->ap->sglen); - if (len != sctx->count) - return -EINVAL; + if (len != sctx->count) { + rc = -EINVAL; + goto out; + } len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, nx_ctx->ap->sglen); - if (len != AES_BLOCK_SIZE) - return -EINVAL; + if (len != AES_BLOCK_SIZE) { + rc = -EINVAL; + goto out; + } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); @@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = { .cra_blocksize = AES_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_aes_xcbc_init, + .cra_init = nx_crypto_ctx_aes_xcbc_init2, .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 4e91bdb..08f8d5c 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -29,34 +29,28 @@ #include "nx.h" -static int nx_sha256_init(struct shash_desc *desc) +static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) { - struct sha256_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); - struct nx_sg *out_sg; - int len; - u32 max_sg_len; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + int err; - nx_ctx_init(nx_ctx, HCOP_FC_SHA); + err = nx_crypto_ctx_sha_init(tfm); + if (err) + return err; - memset(sctx, 0, sizeof *sctx); + nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); + return 0; +} - len = SHA256_DIGEST_SIZE; - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, max_sg_len); - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); +static int nx_sha256_init(struct shash_desc *desc) { + struct sha256_state *sctx = shash_desc_ctx(desc); - if (len != SHA256_DIGEST_SIZE) - return -EINVAL; + memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); @@ -78,6 +72,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; + struct nx_sg *out_sg; u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; @@ -108,6 +103,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); + data_len = SHA256_DIGEST_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &data_len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (data_len != SHA256_DIGEST_SIZE) { + rc = -EINVAL; + goto out; + } + do { /* * to_process: the SHA256_BLOCK_SIZE data chunk to process in @@ -282,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha_init, + .cra_init = nx_crypto_ctx_sha256_init, .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index e6a58d2..aff0fe5 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -28,34 +28,29 @@ #include "nx.h" -static int nx_sha512_init(struct shash_desc *desc) +static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm) { - struct sha512_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); - struct nx_sg *out_sg; - int len; - u32 max_sg_len; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + int err; - nx_ctx_init(nx_ctx, HCOP_FC_SHA); + err = nx_crypto_ctx_sha_init(tfm); + if (err) + return err; - memset(sctx, 0, sizeof *sctx); + nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); + return 0; +} - len = SHA512_DIGEST_SIZE; - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, max_sg_len); - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); +static int nx_sha512_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); - if (len != SHA512_DIGEST_SIZE) - return -EINVAL; + memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); @@ -77,6 +72,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; + struct nx_sg *out_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; @@ -107,6 +103,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); + data_len = SHA512_DIGEST_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &data_len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (data_len != SHA512_DIGEST_SIZE) { + rc = -EINVAL; + goto out; + } + do { /* * to_process: the SHA512_BLOCK_SIZE data chunk to process in @@ -288,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = { .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha_init, + .cra_init = nx_crypto_ctx_sha512_init, .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index f6198f2..4369713 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) /* entry points from the crypto tfm initializers */ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) { + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct nx_ccm_rctx)); return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, NX_MODE_AES_CCM); } int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) { + crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx)); return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_GCM); } diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index de3ea87..cdff03a 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -2,6 +2,8 @@ #ifndef __NX_H__ #define __NX_H__ +#include + #define NX_NAME "nx-crypto" #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" #define NX_VERSION "1.0" @@ -91,8 +93,11 @@ struct nx_crypto_driver { #define NX_GCM4106_NONCE_LEN (4) #define NX_GCM_CTR_OFFSET (12) -struct nx_gcm_priv { +struct nx_gcm_rctx { u8 iv[16]; +}; + +struct nx_gcm_priv { u8 iauth_tag[16]; u8 nonce[NX_GCM4106_NONCE_LEN]; }; @@ -100,8 +105,11 @@ struct nx_gcm_priv { #define NX_CCM_AES_KEY_LEN (16) #define NX_CCM4309_AES_KEY_LEN (19) #define NX_CCM4309_NONCE_LEN (3) -struct nx_ccm_priv { +struct nx_ccm_rctx { u8 iv[16]; +}; + +struct nx_ccm_priv { u8 b0[16]; u8 iauth_tag[16]; u8 oauth_tag[16]; @@ -113,7 +121,7 @@ struct nx_xcbc_priv { }; struct nx_ctr_priv { - u8 iv[16]; + u8 nonce[CTR_RFC3686_NONCE_SIZE]; }; struct nx_crypto_ctx { -- cgit v0.10.2 From d1f15e06b2af57228d11e33e06a172ff58f5bc1c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 8 Jul 2015 09:22:10 +0200 Subject: ALSA: hda - Fix a wrong busy check in alt PCM open Currently, the alt PCM open callback returns -EBUSY when an independent HP is turned off, supposing that it conflicts with the main PCM. However, obviously, this check is wrong when the independent HP itself isn't enabled but the alt PCM was explicitly created via alc_dac_nid by a codec driver. Reported-and-tested-by: Kailang Yang Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index ac0db16..b077bb6 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -5175,7 +5175,7 @@ static int alt_playback_pcm_open(struct hda_pcm_stream *hinfo, int err = 0; mutex_lock(&spec->pcm_mutex); - if (!spec->indep_hp_enabled) + if (spec->indep_hp && !spec->indep_hp_enabled) err = -EBUSY; else spec->active_streams |= 1 << STREAM_INDEP_HP; -- cgit v0.10.2 From afde13126085390aa55843ffefd5ced696e2c273 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 27 Jun 2015 17:51:13 -0300 Subject: ARM: dts: imx27: Adjust the GPT compatible string include/soc/imx/timer.h describes well the different versions of the GPT block among the imx family: enum imx_gpt_type { GPT_TYPE_IMX1, /* i.MX1 */ GPT_TYPE_IMX21, /* i.MX21/27 */ GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */ GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */ }; So the proper compatible string for the MX27 case should be "fsl,imx21-gpt". Signed-off-by: Fabio Estevam Reviewed-by: Philippe Reynes Signed-off-by: Shawn Guo diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index bc215e4..b69be5c 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -108,7 +108,7 @@ }; gpt1: timer@10003000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10003000 0x1000>; interrupts = <26>; clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>, @@ -117,7 +117,7 @@ }; gpt2: timer@10004000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10004000 0x1000>; interrupts = <25>; clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>, @@ -126,7 +126,7 @@ }; gpt3: timer@10005000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10005000 0x1000>; interrupts = <24>; clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>, @@ -376,7 +376,7 @@ }; gpt4: timer@10019000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10019000 0x1000>; interrupts = <4>; clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>, @@ -385,7 +385,7 @@ }; gpt5: timer@1001a000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x1001a000 0x1000>; interrupts = <3>; clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>, @@ -436,7 +436,7 @@ }; gpt6: timer@1001f000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x1001f000 0x1000>; interrupts = <2>; clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>, -- cgit v0.10.2 From e8e94ed6285428ab780cd7b0df4622f71eceb39e Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Tue, 2 Jun 2015 22:03:28 +0000 Subject: ARM: dts: mx23: fix iio-hwmon support In order to get iio-hwmon support, the lradc must be declared as an iio provider. So fix this issue by adding the #io-channel-cells property. Signed-off-by: Stefan Wahren Fixes: bd798f9c7b30 ("ARM: dts: mxs: Add iio-hwmon to mx23 soc") Reviewed-by: Marek Vasut Reviewed-by: Alexandre Belloni Cc: Signed-off-by: Shawn Guo diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index c892d58..b995333 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -468,6 +468,7 @@ interrupts = <36 37 38 39 40 41 42 43 44>; status = "disabled"; clocks = <&clks 26>; + #io-channel-cells = <1>; }; spdif@80054000 { -- cgit v0.10.2 From 91c269a0d3eadd63f7112411ee812fbc170dc488 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 6 Jul 2015 20:55:35 +0200 Subject: MAINTAINER: add bridge netfilter So scripts/get_maintainer.pl shows the Netfilter mailing lists. Reported-by: Julien Grall Signed-off-by: Pablo Neira Ayuso diff --git a/MAINTAINERS b/MAINTAINERS index 993d4cf..8183b465 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6996,6 +6996,7 @@ F: include/uapi/linux/netfilter/ F: net/*/netfilter.c F: net/*/netfilter/ F: net/netfilter/ +F: net/bridge/br_netfilter*.c NETLABEL M: Paul Moore -- cgit v0.10.2 From 86e8971800381c3a8d8d9327f83b1f97ccb04a4f Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 7 Jul 2015 15:55:21 +0100 Subject: netfilter: bridge: Use __in6_dev_get rather than in6_dev_get in br_validate_ipv6 The commit efb6de9b4ba0092b2c55f6a52d16294a8a698edd "netfilter: bridge: forward IPv6 fragmented packets" introduced a new function br_validate_ipv6 which take a reference on the inet6 device. Although, the reference is not released at the end. This will result to the impossibility to destroy any netdevice using ipv6 and bridge. It's possible to directly retrieve the inet6 device without taking a reference as all netfilter hooks are protected by rcu_read_lock via nf_hook_slow. Spotted while trying to destroy a Xen guest on the upstream Linux: "unregister_netdevice: waiting for vif1.0 to become free. Usage count = 1" Signed-off-by: Julien Grall Cc: Bernhard Thaler Cc: Pablo Neira Ayuso Cc: fw@strlen.de Cc: ian.campbell@citrix.com Cc: wei.liu2@citrix.com Cc: Bob Liu Acked-by: Stephen Hemminger Signed-off-by: Pablo Neira Ayuso diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index 6d12d26..13b7d1e 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb) { const struct ipv6hdr *hdr; struct net_device *dev = skb->dev; - struct inet6_dev *idev = in6_dev_get(skb->dev); + struct inet6_dev *idev = __in6_dev_get(skb->dev); u32 pkt_len; u8 ip6h_len = sizeof(struct ipv6hdr); -- cgit v0.10.2 From 69711ca19b06d1b33d8f21213b540b5d1c638dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Hinderer?= Date: Wed, 8 Jul 2015 00:02:01 +0200 Subject: x86/kconfig: Fix typo in the CONFIG_CMDLINE_BOOL help text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Hinderer Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Samuel Thibault Cc: Thomas Gleixner Signed-off-by: Ingo Molnar diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0b2929f..3dbb7e7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2020,7 +2020,7 @@ config CMDLINE_BOOL To compile command line arguments into the kernel, set this option to 'Y', then fill in the - the boot arguments in CONFIG_CMDLINE. + boot arguments in CONFIG_CMDLINE. Systems with fully functional boot loaders (i.e. non-embedded) should leave this option set to 'N'. -- cgit v0.10.2 From 2c3d99845eb7b1e9f3d2863099bc5a25c8461d3f Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 6 Jul 2015 15:15:01 +0100 Subject: drm/i915: Restore all GGTT VMAs on resume When rotated and partial views were added no one spotted the resume path which assumes only one GGTT VMA per object and hence is now skipping rebind of alternative views. Signed-off-by: Tvrtko Ursulin Cc: Daniel Vetter Cc: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9daa288..dcc6a88 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2546,6 +2546,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + struct i915_vma *vma; + bool flush; i915_check_and_clear_faults(dev); @@ -2555,16 +2557,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) dev_priv->gtt.base.total, true); + /* Cache flush objects bound into GGTT and rebind them. */ + vm = &dev_priv->gtt.base; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, - &dev_priv->gtt.base); - if (!vma) - continue; + flush = false; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (vma->vm != vm) + continue; - i915_gem_clflush_object(obj, obj->pin_display); - WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); - } + WARN_ON(i915_vma_bind(vma, obj->cache_level, + PIN_UPDATE)); + flush = true; + } + + if (flush) + i915_gem_clflush_object(obj, obj->pin_display); + } if (INTEL_INFO(dev)->gen >= 8) { if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) -- cgit v0.10.2 From a899418167264c7bac574b1a0f1b2c26c5b0995a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 5 Jul 2015 17:12:30 +0000 Subject: hotplug: Prevent alloc/free of irq descriptors during cpu up/down When a cpu goes up some architectures (e.g. x86) have to walk the irq space to set up the vector space for the cpu. While this needs extra protection at the architecture level we can avoid a few race conditions by preventing the concurrent allocation/free of irq descriptors and the associated data. When a cpu goes down it moves the interrupts which are targeted to this cpu away by reassigning the affinities. While this happens interrupts can be allocated and freed, which opens a can of race conditions in the code which reassignes the affinities because interrupt descriptors might be freed underneath. Example: CPU1 CPU2 cpu_up/down irq_desc = irq_to_desc(irq); remove_from_radix_tree(desc); raw_spin_lock(&desc->lock); free(desc); We could protect the irq descriptors with RCU, but that would require a full tree change of all accesses to interrupt descriptors. But fortunately these kind of race conditions are rather limited to a few things like cpu hotplug. The normal setup/teardown is very well serialized. So the simpler and obvious solution is: Prevent allocation and freeing of interrupt descriptors accross cpu hotplug. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: xiao jin Cc: Joerg Roedel Cc: Borislav Petkov Cc: Yanmin Zhang Link: http://lkml.kernel.org/r/20150705171102.063519515@linutronix.de diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 624a668..fcea4e4 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -87,7 +87,12 @@ struct irq_desc { const char *name; } ____cacheline_internodealigned_in_smp; -#ifndef CONFIG_SPARSE_IRQ +#ifdef CONFIG_SPARSE_IRQ +extern void irq_lock_sparse(void); +extern void irq_unlock_sparse(void); +#else +static inline void irq_lock_sparse(void) { } +static inline void irq_unlock_sparse(void) { } extern struct irq_desc irq_desc[NR_IRQS]; #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 9c9c9fa..6a37454 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "smpboot.h" @@ -392,13 +393,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) smpboot_park_threads(cpu); /* - * So now all preempt/rcu users must observe !cpu_active(). + * Prevent irq alloc/free while the dying cpu reorganizes the + * interrupt affinities. */ + irq_lock_sparse(); + /* + * So now all preempt/rcu users must observe !cpu_active(). + */ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); + irq_unlock_sparse(); goto out_release; } BUG_ON(cpu_online(cpu)); @@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ per_cpu(cpu_dead_idle, cpu) = false; + /* Interrupts are moved away from the dying cpu, reenable alloc/free */ + irq_unlock_sparse(); + hotplug_cpu__broadcast_tick_pull(cpu); /* This actually kills the CPU. */ __cpu_die(cpu); @@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen) goto out_notify; } + /* + * Some architectures have to walk the irq descriptors to + * setup the vector space for the cpu which comes online. + * Prevent irq alloc/free across the bringup. + */ + irq_lock_sparse(); + /* Arch-specific enabling code. */ ret = __cpu_up(cpu, idle); + + irq_unlock_sparse(); + if (ret != 0) goto out_notify; BUG_ON(!cpu_online(cpu)); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4834ee8..61008b8 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc); #ifdef CONFIG_SPARSE_IRQ static inline void irq_mark_irq(unsigned int irq) { } -extern void irq_lock_sparse(void); -extern void irq_unlock_sparse(void); #else extern void irq_mark_irq(unsigned int irq); -static inline void irq_lock_sparse(void) { } -static inline void irq_unlock_sparse(void) { } #endif extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); -- cgit v0.10.2 From 5fc472a628edb8ad83016a063329e8b589a04060 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 8 Jul 2015 13:17:31 +0200 Subject: perf stat: Fix shadow declaration of close Vinson reported shadow declaration of close introduced by the following commit: 106a94a0f8c2 perf stat: Introduce read_counters function Using close_counters name instead. Reported-by: Vinson Lee Signed-off-by: Jiri Olsa Cc: Jiri Olsa Cc: Peter Zijlstra Fixes: 106a94a0f8c2 ("perf stat: Introduce read_counters function") Link: http://lkml.kernel.org/r/20150708111731.GA3512@krava.redhat.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 37e301a..d99d850 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -343,7 +343,7 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static void read_counters(bool close) +static void read_counters(bool close_counters) { struct perf_evsel *counter; @@ -354,7 +354,7 @@ static void read_counters(bool close) if (process_counter(counter)) pr_warning("failed to process counter %s\n", counter->name); - if (close) { + if (close_counters) { perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), thread_map__nr(evsel_list->threads)); } -- cgit v0.10.2 From 63fef06ada94172d7b4295fc205441bafd8f8fb3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 7 Jul 2015 11:15:46 +0200 Subject: drm/i915: Check crtc->active in intel_crtc_disable_planes This was lost in commit ce22dba92de22e951dee2ff89937a39754d2dd91 Author: Maarten Lankhorst Date: Tue Apr 21 17:12:56 2015 +0300 drm/i915: Move toggling planes out of crtc enable/disable. and we still need that crtc->active check since the overall modeset flow doesn't yet take dpms state into account properly. Fixes WARNING backtraces on at least bdw/hsw due to the ips disabling code being upset about being run on a switched-off pipe. We don't need a corresponding change on the enable side since with the old setCrtc semantics we always force-enable the pipe after a modeset. And the dpms function intel_crtc_control already checks for ->active. Reported-by: Linus Torvalds Cc: Linus Torvalds Cc: Maarten Lankhorst Cc: Ander Conselvan de Oliveira Signed-off-by: Daniel Vetter Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8aa8038..647b140 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4854,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) struct intel_plane *intel_plane; int pipe = intel_crtc->pipe; + if (!intel_crtc->active) + return; + intel_crtc_wait_for_pending_flips(crtc); intel_pre_disable_primary(crtc); -- cgit v0.10.2 From dec4f799d0a4c9edae20512fa60b0a36f3299ca2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 7 Jul 2015 11:15:47 +0200 Subject: drm/i915: Use crtc_state->active in primary check_plane func Since commit 8c7b5ccb729870e606321b3703e2c2e698c49a95 Author: Ander Conselvan de Oliveira Date: Tue Apr 21 17:13:19 2015 +0300 drm/i915: Use atomic helpers for computing changed flags we compute the plane state for a modeset before actually committing any changes, which means crtc->active won't be correct yet. Looking at future work in the modeset conversion targetting 4.3 the only places where crtc_state->active isn't accurate is when disabling other CRTCs than the one the modeset is for (when stealing connectors). Which isn't the case here. And that's also confirmed by an audit, we do unconditionally update crtc_state->active for the current pipe. We also don't need to update any other plane check functions since we only ever add the primary state to the modeset update right now. Cc: Ander Conselvan de Oliveira Cc: Maarten Lankhorst Cc: Jani Nikula Signed-off-by: Daniel Vetter Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 647b140..ba93219 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13276,7 +13276,7 @@ intel_check_primary_plane(struct drm_plane *plane, if (ret) return ret; - if (intel_crtc->active) { + if (crtc_state->base.active) { struct intel_plane_state *old_state = to_intel_plane_state(plane->state); -- cgit v0.10.2 From de24264026463dd3f32f2c2e01c390f9d0651e37 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 19 Jun 2015 16:38:39 +0200 Subject: ARM: dts: imx53-qsb: fix TVE entry Correct HSYNC/VSYNC pins and add ddc-i2c-bus property Signed-off-by: Philipp Zabel Signed-off-by: Shawn Guo diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index 181ae5e..ab4ba39 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi @@ -295,9 +295,10 @@ &tve { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_vga_sync>; + ddc-i2c-bus = <&i2c2>; fsl,tve-mode = "vga"; - fsl,hsync-pin = <4>; - fsl,vsync-pin = <6>; + fsl,hsync-pin = <7>; /* IPU DI1 PIN7 via EIM_OE */ + fsl,vsync-pin = <8>; /* IPU DI1 PIN8 via EIM_RW */ status = "okay"; }; -- cgit v0.10.2 From d438462c20a300139c2e5e65b96cadaa21b58d9a Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 30 Jun 2015 14:48:24 +0200 Subject: ARM: imx6: gpc: always enable PU domain if CONFIG_PM is not set If CONFIG_PM is not set the PU power domain needs to be enabled always, otherwise there are two failure scenarios which will hang the system if one of the devices in the PU domain is accessed. 1. New DTs (4.1+) drop the "always-on" property from the PU regulator, so if it isn't properly enabled by the GPC code it will be disabled at the end of boot. 2. If the bootloader already disabled the PU domain the GPC explicitly needs to enable it again, even if the kernel doesn't do any power management. This is a bit hypothetical, as it requires to boot a mainline kernel on a downstream bootloader, as no mainline bootloader disables the PM domains. Cc: # 4.1 Signed-off-by: Lucas Stach Acked-by: Philipp Zabel Signed-off-by: Shawn Guo diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 80bad29..8c4467f 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void) } } -#ifdef CONFIG_PM_GENERIC_DOMAINS - static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd) { int iso, iso2sw; @@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = { static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) { struct clk *clk; - bool is_off; int i; imx6q_pu_domain.reg = pu_reg; @@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) } imx6q_pu_domain.num_clks = i; - is_off = IS_ENABLED(CONFIG_PM); - if (is_off) { - _imx6q_pm_pu_power_off(&imx6q_pu_domain.base); - } else { - /* - * Enable power if compiled without CONFIG_PM in case the - * bootloader disabled it. - */ - imx6q_pm_pu_power_on(&imx6q_pu_domain.base); - } + /* Enable power always in case bootloader disabled it. */ + imx6q_pm_pu_power_on(&imx6q_pu_domain.base); + + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) + return 0; - pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off); + pm_genpd_init(&imx6q_pu_domain.base, NULL, false); return of_genpd_add_provider_onecell(dev->of_node, &imx_gpc_onecell_data); @@ -437,13 +429,6 @@ clk_err: return -EINVAL; } -#else -static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg) -{ - return 0; -} -#endif /* CONFIG_PM_GENERIC_DOMAINS */ - static int imx_gpc_probe(struct platform_device *pdev) { struct regulator *pu_reg; -- cgit v0.10.2 From 6224beb12e190ff11f3c7d4bf50cb2922878f600 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 7 Jul 2015 15:05:03 -0400 Subject: tracing: Have branch tracer use recursive field of task struct Fengguang Wu's tests triggered a bug in the branch tracer's start up test when CONFIG_DEBUG_PREEMPT set. This was because that config adds some debug logic in the per cpu field, which calls back into the branch tracer. The branch tracer has its own recursive checks, but uses a per cpu variable to implement it. If retrieving the per cpu variable calls back into the branch tracer, you can see how things will break. Instead of using a per cpu variable, use the trace_recursion field of the current task struct. Simply set a bit when entering the branch tracing and clear it when leaving. If the bit is set on entry, just don't do the tracing. There's also the case with lockdep, as the local_irq_save() called before the recursion can also trigger code that can call back into the function. Changing that to a raw_local_irq_save() will protect that as well. This prevents the recursion and the inevitable crash that follows. Link: http://lkml.kernel.org/r/20150630141803.GA28071@wfg-t540p.sh.intel.com Cc: stable@vger.kernel.org # 3.10+ Reported-by: Fengguang Wu Tested-by: Fengguang Wu Signed-off-by: Steven Rostedt diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f060716..74bde81 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -444,6 +444,7 @@ enum { TRACE_CONTROL_BIT, + TRACE_BRANCH_BIT, /* * Abuse of the trace_recursion. * As we need a way to maintain state if we are tracing the function diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index a87b43f..e2e12ad 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) struct trace_branch *entry; struct ring_buffer *buffer; unsigned long flags; - int cpu, pc; + int pc; const char *p; + if (current->trace_recursion & TRACE_BRANCH_BIT) + return; + /* * I would love to save just the ftrace_likely_data pointer, but * this code can also be used by modules. Ugly things can happen @@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) if (unlikely(!tr)) return; - local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = per_cpu_ptr(tr->trace_buffer.data, cpu); - if (atomic_inc_return(&data->disabled) != 1) + raw_local_irq_save(flags); + current->trace_recursion |= TRACE_BRANCH_BIT; + data = this_cpu_ptr(tr->trace_buffer.data); + if (atomic_read(&data->disabled)) goto out; pc = preempt_count(); @@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) __buffer_unlock_commit(buffer, event); out: - atomic_dec(&data->disabled); - local_irq_restore(flags); + current->trace_recursion &= ~TRACE_BRANCH_BIT; + raw_local_irq_restore(flags); } static inline -- cgit v0.10.2 From 45820c294fe1b1a9df495d57f40585ef2d069a39 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 8 Jul 2015 09:33:38 -0700 Subject: Fix broken audit tests for exec arg len The "fix" in commit 0b08c5e5944 ("audit: Fix check of return value of strnlen_user()") didn't fix anything, it broke things. As reported by Steven Rostedt: "Yes, strnlen_user() returns 0 on fault, but if you look at what len is set to, than you would notice that on fault len would be -1" because we just subtracted one from the return value. So testing against 0 doesn't test for a fault condition, it tests against a perfectly valid empty string. Also fix up the usual braindamage wrt using WARN_ON() inside a conditional - make it part of the conditional and remove the explicit unlikely() (which is already part of the WARN_ON*() logic, exactly so that you don't have to write unreadable code. Reported-and-tested-by: Steven Rostedt Cc: Jan Kara Cc: Paul Moore Signed-off-by: Linus Torvalds diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 09c6564..e85bdfd 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1021,8 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context, * for strings that are too long, we should not have created * any. */ - if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) { - WARN_ON(1); + if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) { send_sig(SIGKILL, current, 0); return -1; } -- cgit v0.10.2 From 07f18f0bb8d8d65badd8b4988b40d329fc0cc6dc Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Fri, 3 Jul 2015 06:03:06 +0200 Subject: drm/radeon: Handle irqs only based on irq ring, not irq status regs. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Trying to resolve issues with missed vblanks and impossible values inside delivered kms pageflip completion events showed that radeon's irq handling sometimes doesn't handle valid irqs, but silently skips them. This was observed for vblank interrupts. Although those irqs have corresponding events queued in the gpu's irq ring at time of interrupt, and therefore the corresponding handling code gets triggered by these events, the handling code sometimes silently skipped processing the irq. The reason for those skips is that the handling code double-checks for each irq event if the corresponding irq status bits in the irq status registers are set. Sometimes those bits are not set at time of check for valid irqs, maybe due to some hardware race on some setups? The problem only seems to happen on some machine + card combos sometimes, e.g., never happened during my testing of different PC cards of the DCE-2/3/4 generation a year ago, but happens consistently now on two different Apple Mac cards (RV730, DCE-3, Apple iMac and Evergreen JUNIPER, DCE-4 in a Apple MacPro). It also doesn't happen at each interrupt but only occassionally every couple of hundred or thousand vblank interrupts. This results in XOrg warning messages like "[ 7084.472] (WW) RADEON(0): radeon_dri2_flip_event_handler: Pageflip completion event has impossible msc 420120 < target_msc 420121" as well as skipped frames and problems for applications that use kms pageflip events or vblank events, e.g., users of DRI2 and DRI3/Present, Waylands Weston compositor, etc. See also https://bugs.freedesktop.org/show_bug.cgi?id=85203 After some talking to Alex and Michel, we decided to fix this by turning the double-check for asserted irq status bits into a warning. Whenever a irq event is queued in the IH ring, always execute the corresponding interrupt handler. Still check the irq status bits, but only to log a DRM_DEBUG message on a mismatch. This fixed the problems reliably on both previously failing cards, RV-730 dual-head tested on both crtcs (pipes D1 and D2) and a triple-output Juniper HD-5770 card tested on all three available crtcs (D1/D2/D3). The r600 and evergreen irq handling is therefore tested, but the cik an si handling is only compile tested due to lack of hw. Reviewed-by: Christian König Signed-off-by: Mario Kleiner CC: Michel Dänzer CC: Alex Deucher CC: # v3.16+ Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 4ecf5ca..248953d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7964,23 +7964,27 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_vblank(rdev, 0); - rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D1 vblank\n"); + if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); + rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; - DRM_DEBUG("IH: D1 vline\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -7990,23 +7994,27 @@ restart_ih: case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_vblank(rdev, 1); - rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D2 vblank\n"); + if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); + rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + break; case 1: /* D2 vline */ - if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; - DRM_DEBUG("IH: D2 vline\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -8016,23 +8024,27 @@ restart_ih: case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[2]) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_vblank(rdev, 2); - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D3 vblank\n"); + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[2])) + radeon_crtc_handle_vblank(rdev, 2); + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + break; case 1: /* D3 vline */ - if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; - DRM_DEBUG("IH: D3 vline\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -8042,23 +8054,27 @@ restart_ih: case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[3]) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_vblank(rdev, 3); - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D4 vblank\n"); + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[3])) + radeon_crtc_handle_vblank(rdev, 3); + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + break; case 1: /* D4 vline */ - if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; - DRM_DEBUG("IH: D4 vline\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -8068,23 +8084,27 @@ restart_ih: case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[4]) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_vblank(rdev, 4); - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D5 vblank\n"); + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[4])) + radeon_crtc_handle_vblank(rdev, 4); + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + break; case 1: /* D5 vline */ - if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; - DRM_DEBUG("IH: D5 vline\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -8094,23 +8114,27 @@ restart_ih: case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[5]) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_vblank(rdev, 5); - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D6 vblank\n"); + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[5])) + radeon_crtc_handle_vblank(rdev, 5); + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + break; case 1: /* D6 vline */ - if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; - DRM_DEBUG("IH: D6 vline\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -8130,88 +8154,112 @@ restart_ih: case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD1\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + break; case 1: - if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD2\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + break; case 2: - if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD3\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + break; case 3: - if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD4\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + break; case 4: - if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD5\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + break; case 5: - if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD6\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + break; case 6: - if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 1\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + break; case 7: - if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 2\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + break; case 8: - if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 3\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + break; case 9: - if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 4\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + break; case 10: - if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 5\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + break; case 11: - if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 6\n"); - } + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 3a6d483..0acde19 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4924,7 +4924,7 @@ restart_ih: return IRQ_NONE; rptr = rdev->ih.rptr; - DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); @@ -4942,23 +4942,27 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_vblank(rdev, 0); - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D1 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; - DRM_DEBUG("IH: D1 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4968,23 +4972,27 @@ restart_ih: case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_vblank(rdev, 1); - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D2 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + break; case 1: /* D2 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; - DRM_DEBUG("IH: D2 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4994,23 +5002,27 @@ restart_ih: case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[2]) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_vblank(rdev, 2); - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D3 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[2])) + radeon_crtc_handle_vblank(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + break; case 1: /* D3 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; - DRM_DEBUG("IH: D3 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5020,23 +5032,27 @@ restart_ih: case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[3]) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_vblank(rdev, 3); - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D4 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[3])) + radeon_crtc_handle_vblank(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + break; case 1: /* D4 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; - DRM_DEBUG("IH: D4 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5046,23 +5062,27 @@ restart_ih: case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[4]) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_vblank(rdev, 4); - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D5 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[4])) + radeon_crtc_handle_vblank(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + break; case 1: /* D5 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; - DRM_DEBUG("IH: D5 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5072,23 +5092,27 @@ restart_ih: case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[5]) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_vblank(rdev, 5); - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D6 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[5])) + radeon_crtc_handle_vblank(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + break; case 1: /* D6 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; - DRM_DEBUG("IH: D6 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5108,88 +5132,100 @@ restart_ih: case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); break; case 1: - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); break; case 2: - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); break; case 3: - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); break; case 4: - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); break; case 5: - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD6\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); break; case 6: - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); break; case 7: - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); break; case 8: - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); break; case 9: - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); break; case 10: - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); break; case 11: - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 6\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5199,46 +5235,52 @@ restart_ih: case 44: /* hdmi */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI0\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI0\n"); break; case 1: - if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI1\n"); break; case 2: - if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI2\n"); break; case 3: - if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI3\n"); break; case 4: - if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI4\n"); break; case 5: - if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI5\n"); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 35dafd7..4ea5b10 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4086,23 +4086,27 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_vblank(rdev, 0); - rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D1 vblank\n"); + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; - DRM_DEBUG("IH: D1 vline\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4112,23 +4116,27 @@ restart_ih: case 5: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_vblank(rdev, 1); - rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D2 vblank\n"); + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; - DRM_DEBUG("IH: D2 vline\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4148,46 +4156,53 @@ restart_ih: case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD1\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT)) + DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); break; case 1: - if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD2\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT)) + DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); break; case 4: - if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD3\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT)) + DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); break; case 5: - if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD4\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT)) + DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); break; case 10: - if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD5\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT)) + DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); break; case 12: - if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { - rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD6\n"); - } + if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT)) + DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4197,18 +4212,22 @@ restart_ih: case 21: /* hdmi */ switch (src_data) { case 4: - if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI0\n"); - } + if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI0\n"); + break; case 5: - if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI1\n"); - } + if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI1\n"); + break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 26388b5..07037e3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6466,23 +6466,27 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_vblank(rdev, 0); - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D1 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; - DRM_DEBUG("IH: D1 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -6492,23 +6496,27 @@ restart_ih: case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_vblank(rdev, 1); - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D2 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + break; case 1: /* D2 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; - DRM_DEBUG("IH: D2 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -6518,23 +6526,27 @@ restart_ih: case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[2]) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_vblank(rdev, 2); - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D3 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[2])) + radeon_crtc_handle_vblank(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + break; case 1: /* D3 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; - DRM_DEBUG("IH: D3 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -6544,23 +6556,27 @@ restart_ih: case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[3]) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_vblank(rdev, 3); - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D4 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[3])) + radeon_crtc_handle_vblank(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + break; case 1: /* D4 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; - DRM_DEBUG("IH: D4 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -6570,23 +6586,27 @@ restart_ih: case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[4]) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_vblank(rdev, 4); - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D5 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[4])) + radeon_crtc_handle_vblank(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + break; case 1: /* D5 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; - DRM_DEBUG("IH: D5 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -6596,23 +6616,27 @@ restart_ih: case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[5]) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_vblank(rdev, 5); - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D6 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[5])) + radeon_crtc_handle_vblank(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + break; case 1: /* D6 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; - DRM_DEBUG("IH: D6 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -6632,88 +6656,112 @@ restart_ih: case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + break; case 1: - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + break; case 2: - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + break; case 3: - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + break; case 4: - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + break; case 5: - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD6\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + break; case 6: - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + break; case 7: - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + break; case 8: - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + break; case 9: - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + break; case 10: - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + break; case 11: - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; - queue_dp = true; - DRM_DEBUG("IH: HPD_RX 6\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -- cgit v0.10.2 From bd833144a23dead304744dc748f5d72d7e92d315 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Fri, 3 Jul 2015 06:03:07 +0200 Subject: drm/amdgpu: Handle irqs only based on irq ring, not irq status regs. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a translation of the patch ... "drm/radeon: Handle irqs only based on irq ring, not irq status regs." ... for the vblank irq handling, to fix the same problem described in that patch on the new driver. Only compile tested due to lack of suitable hw. Reviewed-by: Christian König Signed-off-by: Mario Kleiner CC: Michel Dänzer CC: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 5cde635..6e77964 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3403,19 +3403,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, switch (entry->src_data) { case 0: /* vblank */ - if (disp_int & interrupt_status_offsets[crtc].vblank) { + if (disp_int & interrupt_status_offsets[crtc].vblank) dce_v10_0_crtc_vblank_int_ack(adev, crtc); - if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); - } - DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + break; case 1: /* vline */ - if (disp_int & interrupt_status_offsets[crtc].vline) { + if (disp_int & interrupt_status_offsets[crtc].vline) dce_v10_0_crtc_vline_int_ack(adev, crtc); - DRM_DEBUG("IH: D%d vline\n", crtc + 1); - } + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 95efd98..7f7abb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3402,19 +3402,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, switch (entry->src_data) { case 0: /* vblank */ - if (disp_int & interrupt_status_offsets[crtc].vblank) { + if (disp_int & interrupt_status_offsets[crtc].vblank) dce_v11_0_crtc_vblank_int_ack(adev, crtc); - if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); - } - DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + break; case 1: /* vline */ - if (disp_int & interrupt_status_offsets[crtc].vline) { + if (disp_int & interrupt_status_offsets[crtc].vline) dce_v11_0_crtc_vline_int_ack(adev, crtc); - DRM_DEBUG("IH: D%d vline\n", crtc + 1); - } + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index aaca8d6..08387df 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3237,19 +3237,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, switch (entry->src_data) { case 0: /* vblank */ - if (disp_int & interrupt_status_offsets[crtc].vblank) { + if (disp_int & interrupt_status_offsets[crtc].vblank) WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); - if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); - } - DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + break; case 1: /* vline */ - if (disp_int & interrupt_status_offsets[crtc].vline) { + if (disp_int & interrupt_status_offsets[crtc].vline) WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); - DRM_DEBUG("IH: D%d vline\n", crtc + 1); - } + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); -- cgit v0.10.2 From 828202a382a06eda74202d4888d01a1078c68922 Mon Sep 17 00:00:00 2001 From: Grigori Goronzy Date: Fri, 3 Jul 2015 01:54:10 +0200 Subject: drm/radeon: use RCU query for GEM_BUSY syscall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need to call the (expensive) radeon_bo_wait, checking the fences via RCU is much faster. The reservation done by radeon_bo_wait does not save us from any race conditions. Reviewed-by: Christian König Signed-off-by: Grigori Goronzy Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index ac3c131..7199e19 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -428,7 +428,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_busy *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -440,10 +439,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, &cur_placement, true); + + r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); + if (r == 0) + r = -EBUSY; + else + r = 0; + + cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); args->domain = radeon_mem_type_to_domain(cur_placement); drm_gem_object_unreference_unlocked(gobj); - r = radeon_gem_handle_lockup(rdev, r); return r; } -- cgit v0.10.2 From 54e03986133468e02cb01b76215e4d53a9cf6380 Mon Sep 17 00:00:00 2001 From: Grigori Goronzy Date: Fri, 3 Jul 2015 01:54:11 +0200 Subject: drm/radeon: fix HDP flushing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was regressed by commit 39e7f6f8, although I don't know of any actual issues caused by it. The storage domain is read without TTM locking now, but the lock never helped to prevent any races. Reviewed-by: Christian König Cc: stable@vger.kernel.org Signed-off-by: Grigori Goronzy Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 7199e19..013ec71 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -476,6 +476,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, r = ret; /* Flush HDP cache via MMIO if necessary */ + cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); if (rdev->asic->mmio_hdp_flush && radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) robj->rdev->asic->mmio_hdp_flush(rdev); -- cgit v0.10.2 From 5e3c4f907043a7fae1b48e86536dc7b9efa07e29 Mon Sep 17 00:00:00 2001 From: Grigori Goronzy Date: Fri, 3 Jul 2015 01:54:12 +0200 Subject: drm/radeon: default to 2048 MB GART size on SI+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Newer ASICs have more VRAM on average and allocating more GART as well can have advantages. Also see commit edcd26e8. Ideally, we should scale GART size based on actual VRAM size, but that requires significant restructuring of initialization. v2: extract small helper, apply to error paths Reviewed-by: Christian König Signed-off-by: Grigori Goronzy Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 2593b11..1a532a9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg) } /** + * Determine a sensible default GART size according to ASIC family. + * + * @family ASIC family name + */ +static int radeon_gart_size_auto(enum radeon_family family) +{ + /* default to a larger gart size on newer asics */ + if (family >= CHIP_TAHITI) + return 2048; + else if (family >= CHIP_RV770) + return 1024; + else + return 512; +} + +/** * radeon_check_arguments - validate module params * * @rdev: radeon_device pointer @@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev) } if (radeon_gart_size == -1) { - /* default to a larger gart size on newer asics */ - if (rdev->family >= CHIP_RV770) - radeon_gart_size = 1024; - else - radeon_gart_size = 512; + radeon_gart_size = radeon_gart_size_auto(rdev->family); } /* gtt size must be power of two and greater or equal to 32M */ if (radeon_gart_size < 32) { dev_warn(rdev->dev, "gart size (%d) too small\n", radeon_gart_size); - if (rdev->family >= CHIP_RV770) - radeon_gart_size = 1024; - else - radeon_gart_size = 512; + radeon_gart_size = radeon_gart_size_auto(rdev->family); } else if (!radeon_check_pot_argument(radeon_gart_size)) { dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", radeon_gart_size); - if (rdev->family >= CHIP_RV770) - radeon_gart_size = 1024; - else - radeon_gart_size = 512; + radeon_gart_size = radeon_gart_size_auto(rdev->family); } rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; -- cgit v0.10.2 From 866a92040392d844d151aa6a2ba15be2e6e5df4f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 3 Jul 2015 11:54:28 +0300 Subject: drm/radeon: fix underflow in r600_cp_dispatch_texture() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "if (pass_size > buf->total)" can underflow so I have changed the type of size and pass_size to unsigned to avoid this problem. Reviewed-by: Christian König Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 09e3f39..98f9ada 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev, struct drm_buf *buf; u32 *buffer; const u8 __user *data; - int size, pass_size; + unsigned int size, pass_size; u64 src_offset, dst_offset; if (!radeon_check_offset(dev_priv, tex->offset)) { -- cgit v0.10.2 From 1b42804d27b1c2623309950e9b203b11f4c67f4f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 7 Jul 2015 18:00:49 +0100 Subject: arm64: entry: handle debug exceptions in el*_inv Currently we enable debug exceptions before reading ESR_EL1 in both el0_inv and el1_inv. If a debug exception is taken before we read ESR_EL1, the value will have been corrupted. As el*_inv is typically fatal, an intervening debug exception results in misleading debug information being logged to the console, but is not otherwise harmful. As with the other entry paths, we can use the ESR_EL1 value stashed earlier in the exception entry (in x25 for el0_sync{,_compat}, and x1 for el1_sync), giving us better error reporting in this case. Signed-off-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7691a3..f860bfd 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -352,8 +352,8 @@ el1_inv: // TODO: add support for undefined instructions in kernel mode enable_dbg mov x0, sp + mov x2, x1 mov x1, #BAD_SYNC - mrs x2, esr_el1 b bad_mode ENDPROC(el1_sync) @@ -553,7 +553,7 @@ el0_inv: ct_user_exit mov x0, sp mov x1, #BAD_SYNC - mrs x2, esr_el1 + mov x2, x25 bl bad_mode b ret_to_user ENDPROC(el0_sync) -- cgit v0.10.2 From 621739b00e16ca2d80411dc9b111cb15b91f3ba9 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 8 Jul 2015 16:08:24 -0400 Subject: Revert "dm: only run the queue on completion if congested or no requests pending" This reverts commit 9a0e609e3fd8a95c96629b9fbde6b8c5b9a1456a. (Resolved a conflict during revert due to commit bfebd1cdb4 that came after) This revert is motivated by a couple failure reports on request-based DM multipath testbeds: 1) Netapp reported that their multipath fault injection test under heavy IO load can stall longer than 300 seconds. 2) IBM reported elevated lock contention in their testbed (likely due to increased back pressure due to IO not being dispatched as quickly): https://www.redhat.com/archives/dm-devel/2015-July/msg00057.html Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # 4.1+ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f331d88..de70377 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) */ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) { - int nr_requests_pending; - atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ - nr_requests_pending = md_in_flight(md); - if (!nr_requests_pending) + if (!md_in_flight(md)) wake_up(&md->wait); /* @@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) if (run_queue) { if (md->queue->mq_ops) blk_mq_run_hw_queues(md->queue, true); - else if (!nr_requests_pending || - (nr_requests_pending >= md->queue->nr_congestion_on)) + else blk_run_queue_async(md->queue); } -- cgit v0.10.2 From c867b150de8514d8682978d8e8874c3940ae781b Mon Sep 17 00:00:00 2001 From: Riku Voipio Date: Thu, 18 Jun 2015 15:52:18 +0300 Subject: tools lib: Improve clean target The clean targets miss some .cmd and .d files. Signed-off-by: Riku Voipio Cc: Peter Zijlstra Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/1434631938-12681-1-git-send-email-riku.voipio@linaro.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile index 8bd9606..fe1b02c 100644 --- a/tools/lib/api/Makefile +++ b/tools/lib/api/Makefile @@ -36,7 +36,7 @@ $(LIBFILE): $(API_IN) clean: $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \ - find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM) + find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM) FORCE: diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 6daaff6..7851df1 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -268,7 +268,7 @@ install: install_lib clean: $(call QUIET_CLEAN, libtraceevent) \ - $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \ + $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \ $(RM) TRACEEVENT-CFLAGS tags TAGS PHONY += force plugins -- cgit v0.10.2 From 32f675bbc9be14a40d972820e190ac56341ce198 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 2 Jul 2015 15:57:19 +0200 Subject: net_sched: gen_estimator: extend pps limit rate estimators are limited to 4 Mpps, which was fine years ago, but too small with current hardware generation. Lets use 2^5 scaling instead of 2^10 to get 128 Mpps new limit. On 64bit arch, use an "unsigned long" for temp storage and remove limit. (We do not expect 32bit arches to be able to reach this point) Tested: tc -s -d filter sh dev eth0 parent ffff: filter protocol ip pref 1 u32 filter protocol ip pref 1 u32 fh 800: ht divisor 1 filter protocol ip pref 1 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:15 match 07000000/ff000000 at 12 action order 1: gact action drop random type none pass val 0 index 1 ref 1 bind 1 installed 166 sec Action statistics: Sent 39734251496 bytes 863788076 pkt (dropped 863788117, overlimits 0 requeues 0) rate 4067Mbit 11053596pps backlog 0b 0p requeues 0 Signed-off-by: Eric Dumazet Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9dfb88a..92d886f 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -66,7 +66,7 @@ NOTES. - * avbps is scaled by 2^5, avpps is scaled by 2^10. + * avbps and avpps are scaled by 2^5. * both values are reported as 32 bit unsigned values. bps can overflow for fast links : max speed being 34360Mbit/sec * Minimal interval is HZ/4=250msec (it is the greatest common divisor @@ -85,10 +85,10 @@ struct gen_estimator struct gnet_stats_rate_est64 *rate_est; spinlock_t *stats_lock; int ewma_log; + u32 last_packets; + unsigned long avpps; u64 last_bytes; u64 avbps; - u32 last_packets; - u32 avpps; struct rcu_head e_rcu; struct rb_node node; struct gnet_stats_basic_cpu __percpu *cpu_bstats; @@ -118,8 +118,8 @@ static void est_timer(unsigned long arg) rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { struct gnet_stats_basic_packed b = {0}; + unsigned long rate; u64 brate; - u32 rate; spin_lock(e->stats_lock); read_lock(&est_lock); @@ -133,10 +133,11 @@ static void est_timer(unsigned long arg) e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->rate_est->bps = (e->avbps+0xF)>>5; - rate = (b.packets - e->last_packets)<<(12 - idx); + rate = b.packets - e->last_packets; + rate <<= (7 - idx); e->last_packets = b.packets; e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); - e->rate_est->pps = (e->avpps+0x1FF)>>10; + e->rate_est->pps = (e->avpps + 0xF) >> 5; skip: read_unlock(&est_lock); spin_unlock(e->stats_lock); -- cgit v0.10.2 From aa43c5ff737dac5dcfccd12d23abeefbdf0579c7 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 4 Jul 2015 11:22:30 +0200 Subject: NET: hamradio: Fix IP over bpq encapsulation. Since 1d5da757da860a6916adbf68b09e868062b4b3b8 (ax25: Stop using magic neighbour cache operations.) any attempt to transmit IP packets over a bpqether device will result in a message like "Dead loop on virtual device bpq0, fix it urgently!" Fix suggested by Eric W. Biederman . Signed-off-by: Ralf Baechle Cc: # 4.1 Signed-off-by: David S. Miller diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 7856b6c..d95a50a 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev) memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); dev->flags = 0; + dev->features = NETIF_F_LLTX; /* Allow recursion */ #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) dev->header_ops = &ax25_header_ops; -- cgit v0.10.2 From 3d8cc14152a4b661580761049c7b9711efa60d82 Mon Sep 17 00:00:00 2001 From: Y Vo Date: Fri, 26 Jun 2015 15:01:47 +0700 Subject: arm64: dts: Add poweroff button device node for APM X-Gene platform This patch adds poweroff button device node to support poweroff feature on APM X-Gene Mustang platform. Signed-off-by: Y Vo Signed-off-by: Kevin Hilman diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts index 83578e7..4c55833 100644 --- a/arch/arm64/boot/dts/apm/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm/apm-mustang.dts @@ -23,6 +23,16 @@ device_type = "memory"; reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ }; + + gpio-keys { + compatible = "gpio-keys"; + button@1 { + label = "POWER"; + linux,code = <116>; + linux,input-type = <0x1>; + interrupts = <0x0 0x2d 0x1>; + }; + }; }; &pcie0clk { -- cgit v0.10.2 From cfa520056407acaa29f3aa7aee10c929b371c7cf Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Sun, 5 Jul 2015 12:16:27 -0500 Subject: net: phy: add dependency on HAS_IOMEM to MDIO_BUS_MUX_MMIOREG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On UML builds, mdio-mux-mmioreg.c fails to compile: drivers/net/phy/mdio-mux-mmioreg.c:50:3: error: implicit declaration of function ‘ioremap’ [-Werror=implicit-function-declaration] drivers/net/phy/mdio-mux-mmioreg.c:63:3: error: implicit declaration of function ‘iounmap’ [-Werror=implicit-function-declaration] This is due to CONFIG_OF now being user selectable. Add a dependency on HAS_IOMEM to fix this. Signed-off-by: Rob Herring Cc: Florian Fainelli Cc: David S. Miller Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index cf18940..cb86d7a 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO config MDIO_BUS_MUX_MMIOREG tristate "Support for MMIO device-controlled MDIO bus multiplexers" - depends on OF_MDIO + depends on OF_MDIO && HAS_IOMEM select MDIO_BUS_MUX help This module provides a driver for MDIO bus multiplexers that -- cgit v0.10.2 From efc5120b8259243fa945d0028450c0a7a5a4b9ef Mon Sep 17 00:00:00 2001 From: Tirumalesh Chalamarla Date: Fri, 26 Jun 2015 12:12:23 -0700 Subject: GICv3: Add ITS entry to THUNDER dts The PCIe host controller uses MSIs provided by GICv3 ITS. Enable it on Thunder SoCs by adding an entry to DT. Signed-off-by: Tirumalesh Chalamarla Acked-by: Marc Zyngier Signed-off-by: Kevin Hilman diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi index d8c0bdc..9cb7cf9 100644 --- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi @@ -376,10 +376,19 @@ gic0: interrupt-controller@8010,00000000 { compatible = "arm,gic-v3"; #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; interrupt-controller; reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */ <0x8010 0x80000000 0x0 0x600000>; /* GICR */ interrupts = <1 9 0xf04>; + + its: gic-its@8010,00020000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x8010 0x20000 0x0 0x200000>; + }; }; uaa0: serial@87e0,24000000 { -- cgit v0.10.2 From 758556bdc1c8a8dffea0ea9f9df891878cc2468c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Jul 2015 06:48:06 +0930 Subject: module: Fix load_module() error path The load_module() error path frees a module but forgot to take it out of the mod_tree, leaving a dangling entry in the tree, causing havoc. Cc: Mathieu Desnoyers Reported-by: Arthur Marsh Tested-by: Arthur Marsh Fixes: 93c2e105f6bc ("module: Optimize __module_address() using a latched RB-tree") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell diff --git a/kernel/module.c b/kernel/module.c index 3e0e197..4d2b82e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3557,6 +3557,7 @@ static int load_module(struct load_info *info, const char __user *uargs, mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); + mod_tree_remove(mod); wake_up_all(&module_wq); /* Wait for RCU-sched synchronizing before releasing mod->list. */ synchronize_sched(); -- cgit v0.10.2 From 4d44f2a0266cdcc1226c7d94431ab1d57d0f6d53 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 1 Jul 2015 13:36:01 +0100 Subject: arm: dts: vexpress: describe all PMUs in TC2 dts The dts for the CoreTile Express A15x2 A7x3 (TC2) only describes the PMUs of the Cortex-A15 CPUs, and not the Cortex-A7 CPUs. Now that we have a mechanism for describing disparate PMUs and their interrupts in device tree, this patch makes use of these to describe the PMUs for all CPUs in the system. For consistency, the existing A15 PMU interrupt-affinity property is reflowed across two lines. Signed-off-by: Mark Rutland Acked-by: Will Deacon Acked-by: Sudeep Holla Cc: Liviu Dudau Cc: Lorenzo Pieralisi Signed-off-by: Kevin Hilman diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 107395c..038e30e 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -187,11 +187,22 @@ <1 10 0xf08>; }; - pmu { + pmu_a15 { compatible = "arm,cortex-a15-pmu"; interrupts = <0 68 4>, <0 69 4>; - interrupt-affinity = <&cpu0>, <&cpu1>; + interrupt-affinity = <&cpu0>, + <&cpu1>; + }; + + pmu_a7 { + compatible = "arm,cortex-a7-pmu"; + interrupts = <0 128 4>, + <0 129 4>, + <0 130 4>; + interrupt-affinity = <&cpu2>, + <&cpu3>, + <&cpu4>; }; oscclk6a: oscclk6a { -- cgit v0.10.2 From 3adf7aaa76e720b52092e4bf8e14d8d231583af6 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 1 Jul 2015 13:36:02 +0100 Subject: arm: dts: vexpress: add missing CCI PMU device node to TC2 The CCI device node was added to vexpress CA15_A7(i.e. TC2) much before the CCI PMU support and binding was added. This patch adds the missing PMU node so that CCI PMUs can be used on TC2. Cc: Liviu Dudau Cc: Lorenzo Pieralisi Acked-by: Punit Agrawal Signed-off-by: Sudeep Holla Signed-off-by: Kevin Hilman diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 038e30e..17f63f7 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -150,6 +150,16 @@ interface-type = "ace"; reg = <0x5000 0x1000>; }; + + pmu@9000 { + compatible = "arm,cci-400-pmu,r0"; + reg = <0x9000 0x5000>; + interrupts = <0 105 4>, + <0 101 4>, + <0 102 4>, + <0 103 4>, + <0 104 4>; + }; }; memory-controller@7ffd0000 { -- cgit v0.10.2 From 9ccd608070b6d4fd1a89fd76a84184fe401fb6a8 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 1 Jul 2015 13:36:03 +0100 Subject: arm64: dts: add device tree for ARM SMM-A53x2 on LogicTile Express 20MG Add a DTS file for the MP2 Cortex-A53 Soft Macrocell Model implemented on a LogicTile Express 20MG (V2F-1XV7) daughterboard. This is based on the version that's currently available from the ARM DTS repository [1]. [1] git://linux-arm.org/arm-dts.git Signed-off-by: Kristina Martsenko Acked-by: Sudeep Holla Signed-off-by: Kevin Hilman diff --git a/MAINTAINERS b/MAINTAINERS index 8133cef..3729a4a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1614,6 +1614,7 @@ M: Lorenzo Pieralisi L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/vexpress* +F: arch/arm64/boot/dts/arm/vexpress* F: arch/arm/mach-vexpress/ F: */*/vexpress* F: */*/*/vexpress* diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile index c5c98b9..bb3c072 100644 --- a/arch/arm64/boot/dts/arm/Makefile +++ b/arch/arm64/boot/dts/arm/Makefile @@ -1,6 +1,7 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb +dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts new file mode 100644 index 0000000..5b1d018 --- /dev/null +++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts @@ -0,0 +1,191 @@ +/* + * ARM Ltd. Versatile Express + * + * LogicTile Express 20MG + * V2F-1XV7 + * + * Cortex-A53 (2 cores) Soft Macrocell Model + * + * HBI-0247C + */ + +/dts-v1/; + +#include + +/ { + model = "V2F-1XV7 Cortex-A53x2 SMM"; + arm,hbi = <0x247>; + arm,vexpress,site = <0xf>; + compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + stdout-path = "serial0:38400n8"; + }; + + aliases { + serial0 = &v2m_serial0; + serial1 = &v2m_serial1; + serial2 = &v2m_serial2; + serial3 = &v2m_serial3; + i2c0 = &v2m_i2c_dvi; + i2c1 = &v2m_i2c_pcie; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 0>; + next-level-cache = <&L2_0>; + }; + + cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 1>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; + }; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */ + }; + + gic: interrupt-controller@2c001000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0 0x2c001000 0 0x1000>, + <0 0x2c002000 0 0x2000>, + <0 0x2c004000 0 0x2000>, + <0 0x2c006000 0 0x2000>; + interrupts = ; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = , + ; + }; + + dcc { + compatible = "arm,vexpress,config-bus"; + arm,vexpress,config-bridge = <&v2m_sysreg>; + + smbclk: osc@4 { + /* SMC clock */ + compatible = "arm,vexpress-osc"; + arm,vexpress-sysreg,func = <1 4>; + freq-range = <40000000 40000000>; + #clock-cells = <0>; + clock-output-names = "smclk"; + }; + + volt@0 { + /* VIO to expansion board above */ + compatible = "arm,vexpress-volt"; + arm,vexpress-sysreg,func = <2 0>; + regulator-name = "VIO_UP"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + volt@1 { + /* 12V from power connector J6 */ + compatible = "arm,vexpress-volt"; + arm,vexpress-sysreg,func = <2 1>; + regulator-name = "12"; + regulator-always-on; + }; + + temp@0 { + /* FPGA temperature */ + compatible = "arm,vexpress-temp"; + arm,vexpress-sysreg,func = <4 0>; + label = "FPGA"; + }; + }; + + smb { + compatible = "simple-bus"; + + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0 0x08000000 0x04000000>, + <1 0 0 0x14000000 0x04000000>, + <2 0 0 0x18000000 0x04000000>, + <3 0 0 0x1c000000 0x04000000>, + <4 0 0 0x0c000000 0x04000000>, + <5 0 0 0x10000000 0x04000000>; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 63>; + interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; + + /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi" + }; +}; -- cgit v0.10.2 From f7e2965db17dd3b60f05fad88e7afc79ea75b48f Mon Sep 17 00:00:00 2001 From: Satish Ashok Date: Mon, 6 Jul 2015 05:53:35 -0700 Subject: bridge: mdb: start delete timer for temp static entries Start the delete timer when adding temp static entries so they can expire. Signed-off-by: Satish Ashok Signed-off-by: Nikolay Aleksandrov Fixes: ccb1c31a7a87 ("bridge: add flags to distinguish permanent mdb entires") Signed-off-by: David S. Miller diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index e29ad70..3bfc675 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -323,6 +323,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; struct net_bridge_mdb_htable *mdb; + unsigned long now = jiffies; int err; mdb = mlock_dereference(br->mdb, br); @@ -347,6 +348,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, if (unlikely(!p)) return -ENOMEM; rcu_assign_pointer(*pp, p); + if (state == MDB_TEMPORARY) + mod_timer(&p->timer, now + br->multicast_membership_interval); br_mdb_notify(br->dev, port, group, RTM_NEWMDB); return 0; -- cgit v0.10.2 From 142b942a75cb10ede1b42bf85368d41449ab4e3b Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Mon, 6 Jul 2015 15:51:20 +0200 Subject: rhashtable: fix for resize events during table walk If rhashtable_walk_next detects a resize operation in progress, it jumps to the new table and continues walking that one. But it misses to drop the reference to it's current item, leading it to continue traversing the new table's bucket in which the current item is sorted into, and after reaching that bucket's end continues traversing the new table's second bucket instead of the first one, thereby potentially missing items. This fixes the rhashtable runtime test for me. Bug probably introduced by Herbert Xu's patch eddee5ba ("rhashtable: Fix walker behaviour during rehash") although not explicitly tested. Fixes: eddee5ba ("rhashtable: Fix walker behaviour during rehash") Signed-off-by: Phil Sutter Acked-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a60a6d3..cc0c697 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -610,6 +610,8 @@ next: iter->skip = 0; } + iter->p = NULL; + /* Ensure we see any new tables. */ smp_rmb(); @@ -620,8 +622,6 @@ next: return ERR_PTR(-EAGAIN); } - iter->p = NULL; - return NULL; } EXPORT_SYMBOL_GPL(rhashtable_walk_next); -- cgit v0.10.2 From d339727c2b1a10f25e6636670ab6e1841170e328 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 6 Jul 2015 17:13:26 +0200 Subject: net: graceful exit from netif_alloc_netdev_queues() User space can crash kernel with ip link add ifb10 numtxqueues 100000 type ifb We must replace a BUG_ON() by proper test and return -EINVAL for crazy values. Fixes: 60877a32bce00 ("net: allow large number of tx queues") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 6778a99..0ad6262 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev) struct netdev_queue *tx; size_t sz = count * sizeof(*tx); - BUG_ON(count < 1 || count > 0xffff); + if (count < 1 || count > 0xffff) + return -EINVAL; tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!tx) { -- cgit v0.10.2 From 95ec655bc465ccb2a3329d4aff9a45e3c8188db5 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 6 Jul 2015 17:25:10 +0200 Subject: Revert "dev: set iflink to 0 for virtual interfaces" This reverts commit e1622baf54df8cc958bf29d71de5ad545ea7d93c. The side effect of this commit is to add a '@NONE' after each virtual interface name with a 'ip link'. It may break existing scripts. Reported-by: Olivier Hartkopp Signed-off-by: Nicolas Dichtel Tested-by: Oliver Hartkopp Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 0ad6262..1e57efd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev) if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) return dev->netdev_ops->ndo_get_iflink(dev); - /* If dev->rtnl_link_ops is set, it's a virtual interface. */ - if (dev->rtnl_link_ops) - return 0; - return dev->ifindex; } EXPORT_SYMBOL(dev_get_iflink); -- cgit v0.10.2 From 673c2c34f684e9d4328459e426ab54d51a5865c5 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Wed, 8 Jul 2015 17:07:41 -0400 Subject: modpost: work correctly with tile coldtext sections The tilegx and tilepro compilers use .coldtext for their unlikely executed text section name, so an __attribute__((cold)) function will (when compiled with higher optimization levels) land in the .coldtext section. Modify modpost to add .coldtext to the set of OTHER_TEXT_SECTIONS so we don't get warnings about referencing such a section in an __ex_table block, and then also modify arch/tile/lib/memcpy_user_64.c so that it uses plain ".coldtext" instead of ".coldtext.memcpy". The latter naming is a relic of an earlier use of -ffunction-sections, which we no longer use by default. Signed-off-by: Chris Metcalf Acked-by: Rusty Russell diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c index 88c7016..97bbb60 100644 --- a/arch/tile/lib/memcpy_user_64.c +++ b/arch/tile/lib/memcpy_user_64.c @@ -28,7 +28,7 @@ #define _ST(p, inst, v) \ ({ \ asm("1: " #inst " %0, %1;" \ - ".pushsection .coldtext.memcpy,\"ax\";" \ + ".pushsection .coldtext,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".align 8;" \ @@ -41,7 +41,7 @@ ({ \ unsigned long __v; \ asm("1: " #inst " %0, %1;" \ - ".pushsection .coldtext.memcpy,\"ax\";" \ + ".pushsection .coldtext,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".align 8;" \ diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 91ee1b2..12d3db3 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -886,7 +886,8 @@ static void check_section(const char *modname, struct elf_info *elf, #define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ ".kprobes.text" #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ - ".fixup", ".entry.text", ".exception.text", ".text.*" + ".fixup", ".entry.text", ".exception.text", ".text.*", \ + ".coldtext" #define INIT_SECTIONS ".init.*" #define MEM_INIT_SECTIONS ".meminit.*" -- cgit v0.10.2 From cfbfd86bfde15020bccde377e11586ee5c8b701d Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Mon, 6 Jul 2015 11:57:37 -0500 Subject: amd-xgbe: Fix DMA API debug warning When running a kernel configured with CONFIG_DMA_API_DEBUG=y a warning is issued: DMA-API: device driver tries to sync DMA memory it has not allocated This warning is the result of mapping the full range of the Rx buffer pages allocated and then performing a dma_sync_single_for_cpu against a calculated DMA address. The proper thing to do is to use the dma_sync_single_range_for_cpu with a base DMA address and an offset. Reported-by: Kim Phillips Signed-off-by: Tom Lendacky Tested-by: Kim Phillips Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 661cdaa..b3bc87f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, get_page(pa->pages); bd->pa = *pa; - bd->dma = pa->pages_dma + pa->pages_offset; + bd->dma_base = pa->pages_dma; + bd->dma_off = pa->pages_offset; bd->dma_len = len; pa->pages_offset += len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 506e832..a4473d8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, unsigned int rx_usecs = pdata->rx_usecs; unsigned int rx_frames = pdata->rx_frames; unsigned int inte; + dma_addr_t hdr_dma, buf_dma; if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */ @@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, * Set buffer 2 (hi) address to buffer dma address (hi) and * set control bits OWN and INTE */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); - rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); - rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); + hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; + buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; + rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); + rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); + rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 1e9c28d..aae9d5e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, /* Start with the header buffer which may contain just the header * or the header plus data */ - dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma, - rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, + rdata->rx.hdr.dma_off, + rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); packet = page_address(rdata->rx.hdr.pa.pages) + rdata->rx.hdr.pa.pages_offset; @@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, len -= copy_len; if (len) { /* Add the remaining data as a frag */ - dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma, - rdata->rx.buf.dma_len, DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(pdata->dev, + rdata->rx.buf.dma_base, + rdata->rx.buf.dma_off, + rdata->rx.buf.dma_len, + DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rdata->rx.buf.pa.pages, @@ -1945,8 +1949,9 @@ read_again: if (!skb) error = 1; } else if (rdesc_len) { - dma_sync_single_for_cpu(pdata->dev, - rdata->rx.buf.dma, + dma_sync_single_range_for_cpu(pdata->dev, + rdata->rx.buf.dma_base, + rdata->rx.buf.dma_off, rdata->rx.buf.dma_len, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 63d72a1..717ce21 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -337,7 +337,8 @@ struct xgbe_buffer_data { struct xgbe_page_alloc pa; struct xgbe_page_alloc pa_unmap; - dma_addr_t dma; + dma_addr_t dma_base; + unsigned long dma_off; unsigned int dma_len; }; -- cgit v0.10.2 From 6c3e921b18edca290099adfddde8a50236bf2d80 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 6 Jul 2015 20:34:55 +0200 Subject: net: fec: Ensure clocks are enabled while using mdio bus When a switch is attached to the mdio bus, the mdio bus can be used while the interface is not open. If the IPG clock is not enabled, MDIO reads/writes will simply time out. Add support for runtime PM to control this clock. Enable/disable this clock using runtime PM, with open()/close() and mdio read()/write() function triggering runtime PM operations. Since PM is optional, the IPG clock is enabled at probe and is no longer modified by fec_enet_clk_enable(), thus if PM is not enabled in the kernel, it is guaranteed the clock is running when MDIO operations are performed. Signed-off-by: Andrew Lunn Acked-by: Fugang Duan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 1f89c59..42e20e5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAEM_V 0x8 #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 +#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ static struct platform_device_id fec_devtype[] = { { @@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev) static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) + return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto out; } - /* return value */ - return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) + return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; } - return 0; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_clk_enable(struct net_device *ndev, bool enable) @@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; - ret = clk_prepare_enable(fep->clk_ipg); - if (ret) - goto failed_clk_ipg; if (fep->clk_enet_out) { ret = clk_prepare_enable(fep->clk_enet_out); if (ret) @@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } } else { clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -1874,8 +1893,6 @@ failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ipg); -failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); return ret; @@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; + ret = pm_runtime_get_sync(&fep->pdev->dev); + if (IS_ERR_VALUE(ret)) + return ret; + pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) - return ret; + goto clk_enable; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. @@ -2881,6 +2902,9 @@ err_enet_mii_probe: fec_enet_free_buffers(ndev); err_enet_alloc: fec_enet_clk_enable(ndev, false); +clk_enable: + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; } @@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); + fec_enet_free_buffers(ndev); return 0; @@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); @@ -3434,6 +3465,8 @@ fec_probe(struct platform_device *pdev) netif_carrier_off(ndev); fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); ret = register_netdev(ndev); if (ret) @@ -3447,6 +3480,12 @@ fec_probe(struct platform_device *pdev) fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); + + pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; failed_register: @@ -3457,6 +3496,8 @@ failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: failed_phy: @@ -3568,7 +3609,28 @@ failed_clk: return ret; } -static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); +static int __maybe_unused fec_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + clk_disable_unprepare(fep->clk_ipg); + + return 0; +} + +static int __maybe_unused fec_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + return clk_prepare_enable(fep->clk_ipg); +} + +static const struct dev_pm_ops fec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) +}; static struct platform_driver fec_driver = { .driver = { -- cgit v0.10.2 From 4f7d2cdfdde71ffe962399b7020c674050329423 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 7 Jul 2015 00:07:52 +0200 Subject: rtnetlink: verify IFLA_VF_INFO attributes before passing them to driver Jason Gunthorpe reported that since commit c02db8c6290b ("rtnetlink: make SR-IOV VF interface symmetric"), we don't verify IFLA_VF_INFO attributes anymore with respect to their policy, that is, ifla_vfinfo_policy[]. Before, they were part of ifla_policy[], but they have been nested since placed under IFLA_VFINFO_LIST, that contains the attribute IFLA_VF_INFO, which is another nested attribute for the actual VF attributes such as IFLA_VF_MAC, IFLA_VF_VLAN, etc. Despite the policy being split out from ifla_policy[] in this commit, it's never applied anywhere. nla_for_each_nested() only does basic nla_ok() testing for struct nlattr, but it doesn't know about the data context and their requirements. Fix, on top of Jason's initial work, does 1) parsing of the attributes with the right policy, and 2) using the resulting parsed attribute table from 1) instead of the nla_for_each_nested() loop (just like we used to do when still part of ifla_policy[]). Reference: http://thread.gmane.org/gmane.linux.network/368913 Fixes: c02db8c6290b ("rtnetlink: make SR-IOV VF interface symmetric") Reported-by: Jason Gunthorpe Cc: Chris Wright Cc: Sucheta Chakraborty Cc: Greg Rose Cc: Jeff Kirsher Cc: Rony Efraim Cc: Vlad Zolotarov Cc: Nicolas Dichtel Cc: Thomas Graf Signed-off-by: Jason Gunthorpe Signed-off-by: Daniel Borkmann Acked-by: Vlad Zolotarov Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 01ced4a..9e433d5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1328,10 +1328,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, }; -static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { - [IFLA_VF_INFO] = { .type = NLA_NESTED }, -}; - static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, @@ -1488,96 +1484,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return 0; } -static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) +static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) { - int rem, err = -EINVAL; - struct nlattr *vf; const struct net_device_ops *ops = dev->netdev_ops; + int err = -EINVAL; - nla_for_each_nested(vf, attr, rem) { - switch (nla_type(vf)) { - case IFLA_VF_MAC: { - struct ifla_vf_mac *ivm; - ivm = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_mac) - err = ops->ndo_set_vf_mac(dev, ivm->vf, - ivm->mac); - break; - } - case IFLA_VF_VLAN: { - struct ifla_vf_vlan *ivv; - ivv = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_vlan) - err = ops->ndo_set_vf_vlan(dev, ivv->vf, - ivv->vlan, - ivv->qos); - break; - } - case IFLA_VF_TX_RATE: { - struct ifla_vf_tx_rate *ivt; - struct ifla_vf_info ivf; - ivt = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_get_vf_config) - err = ops->ndo_get_vf_config(dev, ivt->vf, - &ivf); - if (err) - break; - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_rate) - err = ops->ndo_set_vf_rate(dev, ivt->vf, - ivf.min_tx_rate, - ivt->rate); - break; - } - case IFLA_VF_RATE: { - struct ifla_vf_rate *ivt; - ivt = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_rate) - err = ops->ndo_set_vf_rate(dev, ivt->vf, - ivt->min_tx_rate, - ivt->max_tx_rate); - break; - } - case IFLA_VF_SPOOFCHK: { - struct ifla_vf_spoofchk *ivs; - ivs = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_spoofchk) - err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, - ivs->setting); - break; - } - case IFLA_VF_LINK_STATE: { - struct ifla_vf_link_state *ivl; - ivl = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_link_state) - err = ops->ndo_set_vf_link_state(dev, ivl->vf, - ivl->link_state); - break; - } - case IFLA_VF_RSS_QUERY_EN: { - struct ifla_vf_rss_query_en *ivrssq_en; + if (tb[IFLA_VF_MAC]) { + struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); - ivrssq_en = nla_data(vf); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_rss_query_en) - err = ops->ndo_set_vf_rss_query_en(dev, - ivrssq_en->vf, - ivrssq_en->setting); - break; - } - default: - err = -EINVAL; - break; - } - if (err) - break; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_mac) + err = ops->ndo_set_vf_mac(dev, ivm->vf, + ivm->mac); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_VLAN]) { + struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_vlan) + err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, + ivv->qos); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_TX_RATE]) { + struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); + struct ifla_vf_info ivf; + + err = -EOPNOTSUPP; + if (ops->ndo_get_vf_config) + err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); + if (err < 0) + return err; + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, + ivf.min_tx_rate, + ivt->rate); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_RATE]) { + struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, + ivt->min_tx_rate, + ivt->max_tx_rate); + if (err < 0) + return err; } + + if (tb[IFLA_VF_SPOOFCHK]) { + struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_spoofchk) + err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, + ivs->setting); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_LINK_STATE]) { + struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_link_state) + err = ops->ndo_set_vf_link_state(dev, ivl->vf, + ivl->link_state); + if (err < 0) + return err; + } + + if (tb[IFLA_VF_RSS_QUERY_EN]) { + struct ifla_vf_rss_query_en *ivrssq_en; + + err = -EOPNOTSUPP; + ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); + if (ops->ndo_set_vf_rss_query_en) + err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, + ivrssq_en->setting); + if (err < 0) + return err; + } + return err; } @@ -1773,14 +1771,21 @@ static int do_setlink(const struct sk_buff *skb, } if (tb[IFLA_VFINFO_LIST]) { + struct nlattr *vfinfo[IFLA_VF_MAX + 1]; struct nlattr *attr; int rem; + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { - if (nla_type(attr) != IFLA_VF_INFO) { + if (nla_type(attr) != IFLA_VF_INFO || + nla_len(attr) < NLA_HDRLEN) { err = -EINVAL; goto errout; } - err = do_setvfinfo(dev, attr); + err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, + ifla_vf_policy); + if (err < 0) + goto errout; + err = do_setvfinfo(dev, vfinfo); if (err < 0) goto errout; status |= DO_SETLINK_NOTIFY; -- cgit v0.10.2 From fc24f2b2094366da8786f59f2606307e934cea17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20Ter=C3=A4s?= Date: Tue, 7 Jul 2015 08:34:13 +0300 Subject: ip_tunnel: fix ipv4 pmtu check to honor inner ip header df MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Frag needed should be sent only if the inner header asked to not fragment. Currently fragmentation is broken if the tunnel has df set, but df was not asked in the original packet. The tunnel's df needs to be still checked to update internally the pmtu cache. Commit 23a3647bc4f93bac broke it, and this commit fixes the ipv4 df check back to the way it was. Fixes: 23a3647bc4f93bac ("ip_tunnels: Use skb-len to PMTU check.") Cc: Pravin B Shelar Signed-off-by: Timo Teräs Acked-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 4c2c3ba..626d9e5 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, EXPORT_SYMBOL(ip_tunnel_encap); static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, - struct rtable *rt, __be16 df) + struct rtable *rt, __be16 df, + const struct iphdr *inner_iph) { struct ip_tunnel *tunnel = netdev_priv(dev); int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; @@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, if (skb->protocol == htons(ETH_P_IP)) { if (!skb_is_gso(skb) && - (df & htons(IP_DF)) && mtu < pkt_size) { + (inner_iph->frag_off & htons(IP_DF)) && + mtu < pkt_size) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); return -E2BIG; @@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { ip_rt_put(rt); goto tx_error; } -- cgit v0.10.2 From b5a983f3141239b5b0b4a4e66efa31f8a26354b8 Mon Sep 17 00:00:00 2001 From: Mazhar Rana Date: Tue, 7 Jul 2015 15:04:50 +0530 Subject: bonding: "primary_reselect" with "failure" is not working properly When "primary_reselect" is set to "failure", primary interface should not become active until current active slave is down. But if we set first member of bond device as a "primary" interface and "primary_reselect" is set to "failure" then whenever primary interface's link get back(up) it become active slave even if current active slave is still up. With this patch, "bond_find_best_slave" will not traverse members if primary interface is not candidate for failover/reselection and current active slave is still up. Signed-off-by: Mazhar Rana Signed-off-by: Jay Vosburgh Signed-off-by: Jay Vosburgh Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 19eb990..317a494 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -689,40 +689,57 @@ out: } -static bool bond_should_change_active(struct bonding *bond) +static struct slave *bond_choose_primary_or_current(struct bonding *bond) { struct slave *prim = rtnl_dereference(bond->primary_slave); struct slave *curr = rtnl_dereference(bond->curr_active_slave); - if (!prim || !curr || curr->link != BOND_LINK_UP) - return true; + if (!prim || prim->link != BOND_LINK_UP) { + if (!curr || curr->link != BOND_LINK_UP) + return NULL; + return curr; + } + if (bond->force_primary) { bond->force_primary = false; - return true; + return prim; + } + + if (!curr || curr->link != BOND_LINK_UP) + return prim; + + /* At this point, prim and curr are both up */ + switch (bond->params.primary_reselect) { + case BOND_PRI_RESELECT_ALWAYS: + return prim; + case BOND_PRI_RESELECT_BETTER: + if (prim->speed < curr->speed) + return curr; + if (prim->speed == curr->speed && prim->duplex <= curr->duplex) + return curr; + return prim; + case BOND_PRI_RESELECT_FAILURE: + return curr; + default: + netdev_err(bond->dev, "impossible primary_reselect %d\n", + bond->params.primary_reselect); + return curr; } - if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER && - (prim->speed < curr->speed || - (prim->speed == curr->speed && prim->duplex <= curr->duplex))) - return false; - if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE) - return false; - return true; } /** - * find_best_interface - select the best available slave to be the active one + * bond_find_best_slave - select the best available slave to be the active one * @bond: our bonding struct */ static struct slave *bond_find_best_slave(struct bonding *bond) { - struct slave *slave, *bestslave = NULL, *primary; + struct slave *slave, *bestslave = NULL; struct list_head *iter; int mintime = bond->params.updelay; - primary = rtnl_dereference(bond->primary_slave); - if (primary && primary->link == BOND_LINK_UP && - bond_should_change_active(bond)) - return primary; + slave = bond_choose_primary_or_current(bond); + if (slave) + return slave; bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) -- cgit v0.10.2 From 9e9f665a18008999e749bd41394efcbf5c37a726 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 7 Jul 2015 11:37:00 +0100 Subject: sfc: refactor code in efx_ef10_set_mac_address() Re-organize the structure of error handling to avoid having to duplicate the netif_err() around the ifdefs. The only change to the behaviour of the error-handling is that the PF's data structure to record VF details should only be updated if the original command succeeded. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 8476434..9740cd0 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3829,38 +3829,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); netif_device_attach(efx->net_dev); -#if !defined(CONFIG_SFC_SRIOV) - if (rc == -EPERM) - netif_err(efx, drv, efx->net_dev, - "Cannot change MAC address; use sfboot to enable mac-spoofing" - " on this interface\n"); -#else - if (rc == -EPERM) { +#ifdef CONFIG_SFC_SRIOV + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - /* Switch to PF and change MAC address on vport */ - if (efx->pci_dev->is_virtfn && pci_dev_pf) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + if (rc == -EPERM) { + struct efx_nic *efx_pf; - if (!efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr)) - return 0; - } - netif_err(efx, drv, efx->net_dev, - "Cannot change MAC address; use sfboot to enable mac-spoofing" - " on this interface\n"); - } else if (efx->pci_dev->is_virtfn) { - /* Successfully changed by VF (with MAC spoofing), so update the - * parent PF if possible. - */ - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + /* Switch to PF and change MAC address on vport */ + efx_pf = pci_get_drvdata(pci_dev_pf); - if (pci_dev_pf) { + rc = efx_ef10_sriov_set_vf_mac(efx_pf, + nic_data->vf_index, + efx->net_dev->dev_addr); + } else if (!rc) { struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; unsigned int i; + /* MAC address successfully changed by VF (with MAC + * spoofing) so update the parent PF if possible. + */ for (i = 0; i < efx_pf->vf_count; ++i) { struct ef10_vf *vf = nic_data->vf + i; @@ -3871,8 +3860,14 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) } } } - } + } else #endif + if (rc == -EPERM) { + netif_err(efx, drv, efx->net_dev, + "Cannot change MAC address; use sfboot to enable" + " mac-spoofing on this interface\n"); + } + return rc; } -- cgit v0.10.2 From 7a186f4703de6f4b7feb5d09735b096145b8918c Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 7 Jul 2015 11:37:19 +0100 Subject: sfc: add legacy method for changing a PF's MAC address Some versions of MCFW do not support the MC_CMD_VADAPTOR_SET_MAC command, and ENOSYS will be returned. If the PF created its own vport, the function's datapath must be stopped and the vport can be reconfigured to reflect the new MAC address. If the MCFW created the vport for the PF (which is the case when the nic_data->vport_mac is blank), nothing further needs to be done as the vport is not under the control of the PF. This only applies to PFs because the MCFW in question does not support VFs. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 9740cd0..e0cb361 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) return resource_size(&efx->pci_dev->resource[bar]); } +static bool efx_ef10_is_vf(struct efx_nic *efx) +{ + return efx->type->is_vf; +} + static int efx_ef10_get_pf_index(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); @@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx) return efx_ef10_probe(efx); } +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vport_add_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + +int efx_ef10_vport_del_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + #ifdef CONFIG_SFC_SRIOV static int efx_ef10_probe_vf(struct efx_nic *efx) { @@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) WARN_ON(remove_failed); } +static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac_old[ETH_ALEN]; + int rc, rc2; + + /* Only reconfigure a PF-created vport */ + if (is_zero_ether_addr(nic_data->vport_mac)) + return 0; + + efx_device_detach_sync(efx); + efx_net_stop(efx->net_dev); + down_write(&efx->filter_sem); + efx_ef10_filter_table_remove(efx); + up_write(&efx->filter_sem); + + rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); + if (rc) + goto restore_filters; + + ether_addr_copy(mac_old, nic_data->vport_mac); + rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, + nic_data->vport_mac); + if (rc) + goto restore_vadaptor; + + rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, + efx->net_dev->dev_addr); + if (!rc) { + ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); + } else { + rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); + if (rc2) { + /* Failed to add original MAC, so clear vport_mac */ + eth_zero_addr(nic_data->vport_mac); + goto reset_nic; + } + } + +restore_vadaptor: + rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + if (rc2) + goto reset_nic; +restore_filters: + down_write(&efx->filter_sem); + rc2 = efx_ef10_filter_table_probe(efx); + up_write(&efx->filter_sem); + if (rc2) + goto reset_nic; + + rc2 = efx_net_open(efx->net_dev); + if (rc2) + goto reset_nic; + + netif_device_attach(efx->net_dev); + + return rc; + +reset_nic: + netif_err(efx, drv, efx->net_dev, + "Failed to restore when changing MAC address - scheduling reset\n"); + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); + + return rc ? rc : rc2; +} + static int efx_ef10_set_mac_address(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); @@ -3866,6 +3979,13 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) netif_err(efx, drv, efx->net_dev, "Cannot change MAC address; use sfboot to enable" " mac-spoofing on this interface\n"); + } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { + /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC + * fall-back to the method of changing the MAC address on the + * vport. This only applies to PFs because such versions of + * MCFW do not support VFs. + */ + rc = efx_ef10_vport_set_mac_address(efx); } return rc; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 6c9b6e4..7485f71 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id, NULL, 0, NULL); } -static int efx_ef10_vport_add_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); - - MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); - ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); - - return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, - sizeof(inbuf), NULL, 0, NULL); -} - -static int efx_ef10_vport_del_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); - - MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); - ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); - - return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, - sizeof(inbuf), NULL, 0, NULL); -} - static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, unsigned int vswitch_type) { @@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id) NULL, 0, NULL); } -static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); - - MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); - return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), - NULL, 0, NULL); -} - -static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); - - MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); - return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), - NULL, 0, NULL); -} - static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index db4ef53..6d25b92 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); +int efx_ef10_vport_add_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac); +int efx_ef10_vport_del_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac); +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); #endif /* EF10_SRIOV_H */ -- cgit v0.10.2 From 535a61777f44eebcb71f2a6866f95b17ee0f13c7 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 7 Jul 2015 11:37:33 +0100 Subject: sfc: suppress handled MCDI failures when changing the MAC address Signed-off-by: Shradha Shah Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e0cb361..605cc89 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3933,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx->net_dev->dev_addr); MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, nic_data->vport_id); - rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, - sizeof(inbuf), NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, + sizeof(inbuf), NULL, 0, NULL); efx_ef10_filter_table_probe(efx); up_write(&efx->filter_sem); @@ -3986,6 +3986,9 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) * MCFW do not support VFs. */ rc = efx_ef10_vport_set_mac_address(efx); + } else { + efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, + sizeof(inbuf), NULL, 0, rc); } return rc; -- cgit v0.10.2 From fdd75ea8df370f206a8163786e7470c1277a5064 Mon Sep 17 00:00:00 2001 From: Stephen Smalley Date: Tue, 7 Jul 2015 09:43:45 -0400 Subject: net/tipc: initialize security state for new connection socket Calling connect() with an AF_TIPC socket would trigger a series of error messages from SELinux along the lines of: SELinux: Invalid class 0 type=AVC msg=audit(1434126658.487:34500): avc: denied { } for pid=292 comm="kworker/u16:5" scontext=system_u:system_r:kernel_t:s0 tcontext=system_u:object_r:unlabeled_t:s0 tclass= permissive=0 This was due to a failure to initialize the security state of the new connection sock by the tipc code, leaving it with junk in the security class field and an unlabeled secid. Add a call to security_sk_clone() to inherit the security state from the parent socket. Reported-by: Tim Shearer Signed-off-by: Stephen Smalley Acked-by: Paul Moore Acked-by: Ying Xue Signed-off-by: David S. Miller diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 46b6ed5..3a7567f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2007,6 +2007,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); if (res) goto exit; + security_sk_clone(sock->sk, new_sock->sk); new_sk = new_sock->sk; new_tsock = tipc_sk(new_sk); -- cgit v0.10.2 From 1973db0df7c3bd69de2a1041d3364567287771d9 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Tue, 7 Jul 2015 18:30:39 +0530 Subject: drivers: net: cpsw: fix crash while accessing second slave ethernet interface When cpsw's number of slave is set to 1 in device tree and while accessing second slave ndev and priv in cpsw_tx_interrupt(), there is a kernel crash. This is due to cpsw_get_slave_priv() not verifying number of slaves while retriving netdev priv and returns a invalid memory region. Fixing the issue by introducing number of slave check in cpsw_get_slave_priv() and cpsw_get_slave_ndev(). [ 15.879589] Unable to handle kernel paging request at virtual address 0f0e142c [ 15.888540] pgd = ed374000 [ 15.891359] [0f0e142c] *pgd=00000000 [ 15.895105] Internal error: Oops: 5 [#1] SMP ARM [ 15.899936] Modules linked in: [ 15.903139] CPU: 0 PID: 593 Comm: udhcpc Tainted: G W 4.1.0-12205-gfda8b18-dirty #10 [ 15.912386] Hardware name: Generic AM43 (Flattened Device Tree) [ 15.918557] task: ed2a2e00 ti: ed3fe000 task.ti: ed3fe000 [ 15.924187] PC is at cpsw_tx_interrupt+0x30/0x44 [ 15.929008] LR is at _raw_spin_unlock_irqrestore+0x40/0x44 [ 15.934726] pc : [] lr : [] psr: 20000193 [ 15.934726] sp : ed3ffc08 ip : ed2a2e40 fp : 00000000 [ 15.946685] r10: c0969ce8 r9 : c0969cfc r8 : 00000000 [ 15.952129] r7 : 000000c6 r6 : ee54ab00 r5 : ee169c64 r4 : ee534e00 [ 15.958932] r3 : 0f0e0d0c r2 : 00000000 r1 : ed3ffbc0 r0 : 00000001 [ 15.965735] Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment user [ 15.973261] Control: 10c5387d Table: ad374059 DAC: 00000015 [ 15.979246] Process udhcpc (pid: 593, stack limit = 0xed3fe218) [ 15.985414] Stack: (0xed3ffc08 to 0xed400000) [ 15.989954] fc00: ee54ab00 c009928c c0a9e648 60000193 000032e4 ee169c00 [ 15.998478] fc20: ee169c64 ee169c00 ee169c64 ee54ab00 00000001 00000001 ee67e268 ee008800 [ 16.006995] fc40: ee534800 c009946c ee169c00 ee169c64 c08bd660 c009c370 c009c2a4 000000c6 [ 16.015513] fc60: c08b75c4 c08b0854 00000000 c0098b3c 000000c6 c0098c50 ed3ffcb0 0000003a [ 16.024033] fc80: ed3ffcb0 fa24010c c08b7800 fa240100 ee7e9880 c00094c4 c05ef4e8 60000013 [ 16.032556] fca0: ffffffff ed3ffce4 ee7e9880 c05ef964 00000001 ed2a33d8 00000000 ed2a2e00 [ 16.041080] fcc0: 60000013 ee536bf8 60000013 ee51b800 ee7e9880 ee67e268 ee7e9880 ee534800 [ 16.049603] fce0: c0ad0768 ed3ffcf8 c008e910 c05ef4e8 60000013 ffffffff 00000001 00000001 [ 16.058121] fd00: ee536bf8 c0487a04 00000000 00000000 ee534800 00000000 00000156 c048c990 [ 16.066645] fd20: 00000000 00000000 c0969f40 00000000 00000000 c05000e8 00000001 00000000 [ 16.075167] fd40: 00000000 c051eefc 00000000 ee67e268 00000000 00000000 ee51b800 ed3ffd9c [ 16.083690] fd60: 00000000 ee67e200 ee51b800 ee7e9880 ee67e268 00000000 00000000 ee67e200 [ 16.092211] fd80: ee51b800 ee7e9880 ee67e268 ee534800 ee67e200 c051eedc ee67e268 00000010 [ 16.100727] fda0: 00000000 00000000 ee7e9880 ee534800 00000000 ee67e268 ee51b800 c05006fc [ 16.109247] fdc0: ee67e268 00000001 c0500488 00000156 ee7e9880 00000000 ed3fe000 fffffff4 [ 16.117771] fde0: ed3fff1c ee7e9880 ee534800 00000148 00000000 ed1f8340 00000000 00000000 [ 16.126289] fe00: 00000000 c05a9054 00000000 00000000 00000156 c0ab62a8 00000010 ed3e7000 [ 16.134812] fe20: 00000000 00000008 edcfb700 ed3fff1c c0fb5f94 ed2a2e00 c0fb5f64 000005d8 [ 16.143336] fe40: c0a9b3b8 00000000 ed3e7070 00000000 00000000 00000000 00009f40 00000000 [ 16.151858] fe60: 00000000 00020022 00110008 00000000 00000000 43004400 00000000 ffffffff [ 16.160374] fe80: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 [ 16.168898] fea0: edcfb700 bee5f380 00000014 00000000 ed3fe000 00000000 00004400 c04e2b64 [ 16.177415] fec0: 00000002 c04e3b00 ed3ffeec 00000001 0000011a 00000000 00000000 bee5f394 [ 16.185937] fee0: 00000148 ed3fff10 00000014 00000001 00000000 00000000 ed3ffee4 00000000 [ 16.194459] ff00: 00000000 00000000 00000000 c04e3664 00080011 00000002 06000000 ffffffff [ 16.202980] ff20: 0000ffff ffffffff 0000ffff c008dd54 ee5a6f08 ee636e80 c096972d c0089c14 [ 16.211499] ff40: 00000000 60000013 ee5a6f40 60000013 00000000 ee5a6f40 00000002 00000006 [ 16.220023] ff60: 00000000 edcfb700 00000001 ed2a2e00 c000f60c 00000001 0000011a c008ea34 [ 16.228540] ff80: 00000006 00000000 bee5f380 00000014 bee5f380 00000014 bee5f380 00000122 [ 16.237059] ffa0: c000f7c4 c000f5e0 bee5f380 00000014 00000006 bee5f394 00000148 00000000 [ 16.245581] ffc0: bee5f380 00000014 bee5f380 00000122 fffffd6e 00004300 00004800 00004400 [ 16.254104] ffe0: bee5f378 bee5f36c 000307ec b6f39044 40000010 00000006 ed36fa40 00000000 [ 16.262642] [] (cpsw_tx_interrupt) from [] (handle_irq_event_percpu+0x64/0x204) [ 16.272076] [] (handle_irq_event_percpu) from [] (handle_irq_event+0x40/0x64) [ 16.281330] [] (handle_irq_event) from [] (handle_fasteoi_irq+0xcc/0x1a8) [ 16.290220] [] (handle_fasteoi_irq) from [] (generic_handle_irq+0x20/0x30) [ 16.299197] [] (generic_handle_irq) from [] (__handle_domain_irq+0x64/0xdc) [ 16.308273] [] (__handle_domain_irq) from [] (gic_handle_irq+0x20/0x60) [ 16.316987] [] (gic_handle_irq) from [] (__irq_svc+0x44/0x5c) [ 16.324779] Exception stack(0xed3ffcb0 to 0xed3ffcf8) [ 16.330044] fca0: 00000001 ed2a33d8 00000000 ed2a2e00 [ 16.338567] fcc0: 60000013 ee536bf8 60000013 ee51b800 ee7e9880 ee67e268 ee7e9880 ee534800 [ 16.347090] fce0: c0ad0768 ed3ffcf8 c008e910 c05ef4e8 60000013 ffffffff [ 16.353987] [] (__irq_svc) from [] (_raw_spin_unlock_irqrestore+0x34/0x44) [ 16.362973] [] (_raw_spin_unlock_irqrestore) from [] (cpdma_check_free_tx_desc+0x60/0x6c) [ 16.373311] [] (cpdma_check_free_tx_desc) from [] (cpsw_ndo_start_xmit+0xb4/0x1ac) [ 16.383017] [] (cpsw_ndo_start_xmit) from [] (dev_hard_start_xmit+0x2a4/0x4c0) [ 16.392364] [] (dev_hard_start_xmit) from [] (sch_direct_xmit+0xf4/0x210) [ 16.401246] [] (sch_direct_xmit) from [] (__dev_queue_xmit+0x2ac/0x7bc) [ 16.409960] [] (__dev_queue_xmit) from [] (packet_sendmsg+0xc68/0xeb4) [ 16.418585] [] (packet_sendmsg) from [] (sock_sendmsg+0x14/0x24) [ 16.426663] [] (sock_sendmsg) from [] (SyS_sendto+0xb4/0xe0) [ 16.434377] [] (SyS_sendto) from [] (ret_fast_syscall+0x0/0x54) [ 16.442360] Code: e5943118 e593303c e3530000 0a000002 (e5930720) [ 16.448716] ---[ end trace a68159f094d85ba6 ]--- [ 16.453526] Kernel panic - not syncing: Fatal exception in interrupt [ 16.460149] ---[ end Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Mugunthan V N Cc: # v3.8+ Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4628205..e778703 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -509,9 +509,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { (func)(slave++, ##arg); \ } while (0) #define cpsw_get_slave_ndev(priv, __slave_no__) \ - (priv->slaves[__slave_no__].ndev) + ((__slave_no__ < priv->data.slaves) ? \ + priv->slaves[__slave_no__].ndev : NULL) #define cpsw_get_slave_priv(priv, __slave_no__) \ - ((priv->slaves[__slave_no__].ndev) ? \ + (((__slave_no__ < priv->data.slaves) && \ + (priv->slaves[__slave_no__].ndev)) ? \ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ -- cgit v0.10.2 From f1158b74e54f2e2462ba5e2f45a118246d9d5b43 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 7 Jul 2015 15:55:56 +0200 Subject: bridge: mdb: zero out the local br_ip variable before use Since commit b0e9a30dd669 ("bridge: Add vlan id to multicast groups") there's a check in br_ip_equal() for a matching vlan id, but the mdb functions were not modified to use (or at least zero it) so when an entry was added it would have a garbage vlan id (from the local br_ip variable in __br_mdb_add/del) and this would prevent it from being matched and also deleted. So zero out the whole local ip var to protect ourselves from future changes and also to fix the current bug, since there's no vlan id support in the mdb uapi - use always vlan id 0. Example before patch: root@debian:~# bridge mdb add dev br0 port eth1 grp 239.0.0.1 permanent root@debian:~# bridge mdb dev br0 port eth1 grp 239.0.0.1 permanent root@debian:~# bridge mdb del dev br0 port eth1 grp 239.0.0.1 permanent RTNETLINK answers: Invalid argument After patch: root@debian:~# bridge mdb add dev br0 port eth1 grp 239.0.0.1 permanent root@debian:~# bridge mdb dev br0 port eth1 grp 239.0.0.1 permanent root@debian:~# bridge mdb del dev br0 port eth1 grp 239.0.0.1 permanent root@debian:~# bridge mdb Signed-off-by: Nikolay Aleksandrov Fixes: b0e9a30dd669 ("bridge: Add vlan id to multicast groups") Signed-off-by: David S. Miller diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 3bfc675..60868c2 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -374,6 +374,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, if (!p || p->br != br || p->state == BR_STATE_DISABLED) return -EINVAL; + memset(&ip, 0, sizeof(ip)); ip.proto = entry->addr.proto; if (ip.proto == htons(ETH_P_IP)) ip.u.ip4 = entry->addr.u.ip4; @@ -420,6 +421,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) if (!netif_running(br->dev) || br->multicast_disabled) return -EINVAL; + memset(&ip, 0, sizeof(ip)); ip.proto = entry->addr.proto; if (ip.proto == htons(ETH_P_IP)) { if (timer_pending(&br->ip4_other_query.timer)) -- cgit v0.10.2 From 671b53eec2edcbfac3e53d02cf3d0c6d9ecc07de Mon Sep 17 00:00:00 2001 From: Shradha Shah Date: Wed, 8 Jul 2015 10:12:45 +0100 Subject: sfc: Ensure down_write(&filter_sem) and up_write() are matched before calling efx_net_open() This patch avoids the double up_write to filter_sem if efx_net_open() fails. Resolves: 2d432f20d27c1813a2746008e16dd6ce12a14dc1 Signed-off-by: Shradha Shah Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 7485f71..3c17f27 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -598,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, vf->vlan, &vf->vport_id); if (rc) - goto reset_nic; + goto reset_nic_up_write; restore_mac: if (!is_zero_ether_addr(vf->mac)) { rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); if (rc2) { eth_zero_addr(vf->mac); - goto reset_nic; + goto reset_nic_up_write; } } restore_evb_port: rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); if (rc2) - goto reset_nic; + goto reset_nic_up_write; else vf->vport_assigned = 1; @@ -620,14 +620,16 @@ restore_vadaptor: if (vf->efx) { rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); if (rc2) - goto reset_nic; + goto reset_nic_up_write; } restore_filters: if (vf->efx) { rc2 = vf->efx->type->filter_table_probe(vf->efx); if (rc2) - goto reset_nic; + goto reset_nic_up_write; + + up_write(&vf->efx->filter_sem); up_write(&vf->efx->filter_sem); @@ -639,9 +641,12 @@ restore_filters: } return rc; +reset_nic_up_write: + if (vf->efx) + up_write(&vf->efx->filter_sem); + reset_nic: if (vf->efx) { - up_write(&vf->efx->filter_sem); netif_err(efx, drv, efx->net_dev, "Failed to restore VF - scheduling reset.\n"); efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); -- cgit v0.10.2 From 974d7af5fcc295dcf8315255142b2fe44fd74b0c Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Tue, 7 Jul 2015 13:56:57 -0400 Subject: ipv4: add support for linkdown sysctl to netconf This kernel patch exports the value of the new ignore_routes_with_linkdown via netconf. v2: changes to notify userspace via netlink when sysctl values change and proposed for 'net' since this could be considered a bugfix Signed-off-by: Andy Gospodarek Suggested-by: Nicolas Dichtel Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index 669a1f0..23cbd34 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -15,6 +15,7 @@ enum { NETCONFA_RP_FILTER, NETCONFA_MC_FORWARDING, NETCONFA_PROXY_NEIGH, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7498716..e813196 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1740,6 +1740,8 @@ static int inet_netconf_msgsize_devconf(int type) size += nla_total_size(4); if (type == -1 || type == NETCONFA_PROXY_NEIGH) size += nla_total_size(4); + if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) + size += nla_total_size(4); return size; } @@ -1780,6 +1782,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, nla_put_s32(skb, NETCONFA_PROXY_NEIGH, IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) goto nla_put_failure; + if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && + nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0) + goto nla_put_failure; nlmsg_end(skb, nlh); return 0; @@ -1819,6 +1825,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = { [NETCONFA_FORWARDING] = { .len = sizeof(int) }, [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, + [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) }, }; static int inet_netconf_get_devconf(struct sk_buff *in_skb, @@ -2048,6 +2055,12 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write, inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, ifindex, cnf); } + if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 && + new_value != old_value) { + ifindex = devinet_conf_ifindex(net, cnf); + inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + ifindex, cnf); + } } return ret; -- cgit v0.10.2 From 0769636cb5b95665ebadcd1a41c46f331f5a397d Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 7 Jul 2015 14:02:18 -0400 Subject: vmxnet3: prevent receive getting out of sequence on napi poll vmxnet3's current napi path is built to count every rx descriptor we recieve, and use that as a count of the napi budget. That means its possible to return from a napi poll halfway through recieving a fragmented packet accross multiple dma descriptors. If that happens, the next napi poll will start with the descriptor ring in an improper state (e.g. the first descriptor we look at may have the end-of-packet bit set), which will cause a BUG halt in the driver. Fix the issue by only counting whole received packets in the napi poll and returning that value, rather than the descriptor count. Tested by the reporter and myself, successfully Signed-off-by: Neil Horman CC: Shreyas Bhatewara CC: "David S. Miller" Acked-by: Andy Gospodarek Signed-off-by: David S. Miller diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index da11bb5..46f4cad 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, static const u32 rxprod_reg[2] = { VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 }; - u32 num_rxd = 0; + u32 num_pkts = 0; bool skip_page_frags = false; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; @@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxDesc *rxd; u32 idx, ring_idx; struct vmxnet3_cmd_ring *ring = NULL; - if (num_rxd >= quota) { + if (num_pkts >= quota) { /* we may stop even before we see the EOP desc of * the current pkt */ break; } - num_rxd++; BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); idx = rcd->rxdIdx; ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; @@ -1413,6 +1412,7 @@ not_lro: napi_gro_receive(&rq->napi, skb); ctx->skb = NULL; + num_pkts++; } rcd_done: @@ -1443,7 +1443,7 @@ rcd_done: &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); } - return num_rxd; + return num_pkts; } -- cgit v0.10.2 From 4eed4d8ff984abcb983ada5b3dbf56fce35f1068 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Tue, 7 Jul 2015 20:48:55 +0200 Subject: 3c59x: Fix shared IRQ handling As its first order of business, boomerang_interrupt() checks whether the device really has any pending interrupts. If it does not, it does nothing and returns, but it still returns IRQ_HANDLED. This is wrong: interrupt was not handled, IRQ handlers of other devices sharing this IRQ line need to be called. vortex_interrupt() has it right: it returns IRQ_NONE in this case via IRQ_RETVAL(0). Do the same in boomerang_interrupt(). Signed-off-by: Denys Vlasenko CC: David S. Miller CC: linux-kernel@vger.kernel.org CC: netdev@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 41095eb..2d1ce3c 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2382,6 +2382,7 @@ boomerang_interrupt(int irq, void *dev_id) void __iomem *ioaddr; int status; int work_done = max_interrupt_work; + int handled = 0; ioaddr = vp->ioaddr; @@ -2400,6 +2401,7 @@ boomerang_interrupt(int irq, void *dev_id) if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs can cause this */ + handled = 1; if (status == 0xffff) { /* h/w no longer present (hotplug)? */ if (vortex_debug > 1) @@ -2501,7 +2503,7 @@ boomerang_interrupt(int irq, void *dev_id) handler_exit: vp->handling_irq = 0; spin_unlock(&vp->lock); - return IRQ_HANDLED; + return IRQ_RETVAL(handled); } static int vortex_rx(struct net_device *dev) -- cgit v0.10.2 From d065c3c17dae95832badf6329512dd057c905890 Mon Sep 17 00:00:00 2001 From: Zheng Liu Date: Tue, 7 Jul 2015 13:54:12 -0700 Subject: drivers/net/usb: add device id for NVIDIA Tegra USB 3.0 Ethernet This device is sold as 'NVIDIA Tegra USB 3.0 Ethernet'. Chipset is RTL8153 and works with r8152. Signed-off-by: Zheng Liu Signed-off-by: David S. Miller diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 4545e78..35a2bff 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -523,6 +523,7 @@ static const struct driver_info wwan_info = { #define REALTEK_VENDOR_ID 0x0bda #define SAMSUNG_VENDOR_ID 0x04e8 #define LENOVO_VENDOR_ID 0x17ef +#define NVIDIA_VENDOR_ID 0x0955 static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -710,6 +711,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index aafa1a1..7f6419e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -494,6 +494,7 @@ enum rtl8152_flags { #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_SAMSUNG 0x04e8 #define VENDOR_ID_LENOVO 0x17ef +#define VENDOR_ID_NVIDIA 0x0955 #define MCU_TYPE_PLA 0x0100 #define MCU_TYPE_USB 0x0000 @@ -4117,6 +4118,7 @@ static struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, {} }; -- cgit v0.10.2 From c936835c1ec6f871f32c9b87a7708700320075b3 Mon Sep 17 00:00:00 2001 From: Peter Dunning Date: Wed, 8 Jul 2015 10:05:10 +0100 Subject: sfc: Report TX completions to BQL after all TX events in interrupt The limit for BQL is updated each time we call netdev_tx_completed_queue. Without this patch the BQL limit was updated for every TX event we see. The issue was that this only updated the limit to handle the data we complete in two events as the first event wouldn't show that enough traffic had been processed between them. This was OK when interrupt moderation was off but not when it was on as more data had to be completed in a single interrupt. The patch changes this so that we do report the completion to BQL only when all the TX events in the interrupt have been processed. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 804b9ad..03bc03b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx) */ static int efx_process_channel(struct efx_channel *channel, int budget) { + struct efx_tx_queue *tx_queue; int spent; if (unlikely(!channel->enabled)) return 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->pkts_compl = 0; + tx_queue->bytes_compl = 0; + } + spent = efx_nic_process_eventq(channel, budget); if (spent && efx_channel_has_rx_queue(channel)) { struct efx_rx_queue *rx_queue = @@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget) efx_fast_push_rx_descriptors(rx_queue, true); } + /* Update BQL */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->bytes_compl) { + netdev_tx_completed_queue(tx_queue->core_txq, + tx_queue->pkts_compl, tx_queue->bytes_compl); + } + } + return spent; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d72f522..47d1e3a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -241,6 +241,8 @@ struct efx_tx_queue { unsigned int read_count ____cacheline_aligned_in_smp; unsigned int old_write_count; unsigned int merge_events; + unsigned int bytes_compl; + unsigned int pkts_compl; /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index aaf2987..1833a01 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); - netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; if (pkts_compl > 1) ++tx_queue->merge_events; -- cgit v0.10.2 From d5de1987853a7778bb048a9e52b3486eb9aceb17 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 8 Jul 2015 17:16:49 +0200 Subject: macvtap: Destroy minor_idr on module_exit Destroy minor_idr on module_exit, reclaiming the allocated memory. This was detected by the following semantic patch (written by Luis Rodriguez ) @ defines_module_init @ declarer name module_init, module_exit; declarer name DEFINE_IDR; identifier init; @@ module_init(init); @ defines_module_exit @ identifier exit; @@ module_exit(exit); @ declares_idr depends on defines_module_init && defines_module_exit @ identifier idr; @@ DEFINE_IDR(idr); @ on_exit_calls_destroy depends on declares_idr && defines_module_exit @ identifier declares_idr.idr, defines_module_exit.exit; @@ exit(void) { ... idr_destroy(&idr); ... } @ missing_module_idr_destroy depends on declares_idr && defines_module_exit && !on_exit_calls_destroy @ identifier declares_idr.idr, defines_module_exit.exit; @@ exit(void) { ... +idr_destroy(&idr); } Signed-off-by: Johannes Thumshirn Signed-off-by: David S. Miller diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 6a64197f..b4b6191 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1294,6 +1294,7 @@ static void macvtap_exit(void) class_unregister(macvtap_class); cdev_del(&macvtap_cdev); unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); + idr_destroy(&minor_idr); } module_exit(macvtap_exit); -- cgit v0.10.2 From 19ee835cdb0b5a8eb11a68f25a51b8039d564488 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 29 Jun 2015 14:01:19 +0100 Subject: drm/i915: Declare the swizzling unknown for L-shaped configurations The old style of memory interleaving swizzled upto the end of the first even bank of memory, and then used the remainder as unswizzled on the unpaired bank - i.e. swizzling is not constant for all memory. This causes problems when we try to migrate memory and so the kernel prevents migration at all when we detect L-shaped inconsistent swizzling. However, this issue also extends to userspace who try to manually detile into memory as the swizzling for an individual page is unknown (it depends on its physical address only known to the kernel), userspace cannot correctly swizzle objects. v2: Mark the global swizzling as unknown rather than adjust the value reported to userspace. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91105 Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 633bd1f..d61e74a 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) if (IS_GEN4(dev)) { uint32_t ddc2 = I915_READ(DCC2); - if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) + if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) { + /* Since the swizzling may vary within an + * object, we have no idea what the swizzling + * is for any page in particular. Thus we + * cannot migrate tiled pages using the GPU, + * nor can we tell userspace what the exact + * swizzling is for any object. + */ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } } if (dcc == 0xffffffff) { -- cgit v0.10.2 From 52613921b31d8573a212a4b0854b390a18d9849c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 29 Jun 2015 20:28:35 +0300 Subject: Revert "drm/i915: Allocate context objects from stolen" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stolen gets trashed during hibernation, so storing contexts there is not a very good idea. On my IVB machines this leads to a totally dead GPU on resume. A reboot is required to resurrect it. So let's not store contexts where they will get trampled. This reverts commit 149c86e74fe44dcbac5e9f8d145c5fbc5dc21261. Cc: Chris Wilson Signed-off-by: Ville Syrjälä Acked-by: Chris Wilson Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8867818..d65cbe6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -157,9 +157,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) struct drm_i915_gem_object *obj; int ret; - obj = i915_gem_object_create_stolen(dev, size); - if (obj == NULL) - obj = i915_gem_alloc_object(dev, size); + obj = i915_gem_alloc_object(dev, size); if (obj == NULL) return ERR_PTR(-ENOMEM); -- cgit v0.10.2 From d23f47d4927fd2f61b3a754d83c7bcec215b5cfe Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 8 Jul 2015 17:26:37 +0200 Subject: USB: serial: Destroy serial_minors IDR on module exit Destroy serial_minors IDR on module exit, reclaiming the allocated memory. This was detected by the following semantic patch (written by Luis Rodriguez ) @ defines_module_init @ declarer name module_init, module_exit; declarer name DEFINE_IDR; identifier init; @@ module_init(init); @ defines_module_exit @ identifier exit; @@ module_exit(exit); @ declares_idr depends on defines_module_init && defines_module_exit @ identifier idr; @@ DEFINE_IDR(idr); @ on_exit_calls_destroy depends on declares_idr && defines_module_exit @ identifier declares_idr.idr, defines_module_exit.exit; @@ exit(void) { ... idr_destroy(&idr); ... } @ missing_module_idr_destroy depends on declares_idr && defines_module_exit && !on_exit_calls_destroy @ identifier declares_idr.idr, defines_module_exit.exit; @@ exit(void) { ... +idr_destroy(&idr); } Signed-off-by: Johannes Thumshirn Cc: stable # v3.11 Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 529066b..46f1f13 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void) tty_unregister_driver(usb_serial_tty_driver); put_tty_driver(usb_serial_tty_driver); bus_unregister(&usb_serial_bus_type); + idr_destroy(&serial_minors); } -- cgit v0.10.2 From 0ec62aaee919991d9e0e278f70776d560d95fc29 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 9 Jul 2015 10:51:46 +0200 Subject: cris: Replace do_posix_clock_monotonic_gettime() ktime_get_ts() is the proper interface today. Signed-off-by: Thomas Gleixner Cc: Jesper Nilsson diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index 4dda9bd..e989cee 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port) if (port->write_ts_idx == NBR_IN_DESCR) port->write_ts_idx = 0; idx = port->write_ts_idx++; - do_posix_clock_monotonic_gettime(&port->timestamp[idx]); + ktime_get_ts(&port->timestamp[idx]); port->in_buffer_len += port->inbufchunk; } spin_unlock_irqrestore(&port->lock, flags); -- cgit v0.10.2 From 1f6823faa8c563431a94e614d2b63ce16bb6f658 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 9 Jul 2015 10:51:46 +0200 Subject: time: Get rid of do_posix_clock_monotonic_gettime All users gone. Remove it before we get another one. Signed-off-by: Thomas Gleixner diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 3aa72e6..6e191e4 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts) } #endif -#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) #define ktime_get_real_ts64(ts) getnstimeofday64(ts) /* -- cgit v0.10.2 From a6335fa11e08fc386740557b2a4c1b7ff34bc499 Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Thu, 2 Jul 2015 17:16:01 +0200 Subject: MIPS: bootmem: Don't use memory holes for page bitmap Commit f9a7febd leads to a fact that mapstart and therefore a page bitmap for bootmem allocator immediately follows initrd_end. This doesn't always work well on Octeon, where there are holes in PFN ranges (refer to 5b3b1688 and 4MB-aligned PFN allocation). Depending on the inird location it could happen, that mapstart would be in an area not allocated by plat_mem_setup() in arch/mips/cavium-octeon/setup.c, but in the alignment hole between initrd and the next PFN area. Later on this memory will be unconditionally made available to buddy allocator at the end of free_all_bootmem_core() (mm/bootmem.c). All of this results in Linux using the memory not designated for Linux in Octeon's plat_mem_setup(), which in turn means corruption of the memory used by another OS/baremetal code on the same SoC. It doesn't look to me as a problem of Octeon platform code, but rather as an inability of f9a7febd to deal correctly with the fragmented memory-mappings. Proposed fix moves the check for initrd address to the same calculation-loop in bootmem_init() (arch/mips/kernel/setup.c), which also accounts for kernel code location. This should result in mapstart located starting from the first PFN area after kernel code AND initrd. Signed-off-by: Alexander Sverdlin Cc: linux-mips@linux-mips.org Cc: David Daney Cc: Zubair Lutfullah Kakakhel Cc: Huacai Chen Cc: Andreas Herrmann Cc: Joe Perches Cc: Steven J. Hill Cc: Yusuf Khan Cc: Michael Kreuzer Cc: Aaro Koskinen Patchwork: https://patchwork.linux-mips.org/patch/10594/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index be73c49..008b337 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -337,6 +337,11 @@ static void __init bootmem_init(void) min_low_pfn = start; if (end <= reserved_end) continue; +#ifdef CONFIG_BLK_DEV_INITRD + /* mapstart should be after initrd_end */ + if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) + continue; +#endif if (start >= mapstart) continue; mapstart = max(reserved_end, start); @@ -366,14 +371,6 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } -#ifdef CONFIG_BLK_DEV_INITRD - /* - * mapstart should be after initrd_end - */ - if (initrd_end) - mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); -#endif - /* * Initialize the boot-time allocator with low memory only. */ -- cgit v0.10.2 From 761b4493bbb7b614387794f39e3969716e9e0215 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 24 Jun 2015 09:29:20 +0100 Subject: MIPS: kernel: traps: Fix broken indentation Fix broken indentation caused by the SMTC removal commit b633648c5ad3cfbda0b3daea50d2135d44899259 ("MIPS: MT: Remove SMTC support") Signed-off-by: Markos Chandras Fixes: b633648c5ad3c ("MIPS: MT: Remove SMTC support") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10581/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 2a7b38e..e207a43 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu) BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); - /* Boot CPU's cache setup in setup_arch(). */ - if (!is_boot_cpu) - cpu_cache_init(); - tlb_init(); + /* Boot CPU's cache setup in setup_arch(). */ + if (!is_boot_cpu) + cpu_cache_init(); + tlb_init(); TLBMISS_HANDLER_SETUP(); } -- cgit v0.10.2 From e9d92d223381f1f3be5d87322b576721d3b93612 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 24 Jun 2015 09:52:00 +0100 Subject: MIPS: Fix branch emulation for BLTC and BGEC instructions Commits f1b44067c19258b7614e3cd09dfe8d8e12ff5895 ("MIPS: Emulate the new MIPS R6 B{L,G}T{Z,}{AL,}C instructions") and commit a8ff66f52d3f17b5ae793955270675c197f73d6c ("MIPS: Emulate the new MIPS R6 B{L,G}E{Z,}{AL,}C instructions") added support for emulating various branch compact instructions. However, it missed the case for those which use the old BLEZL and BGTZL opcodes leading to random crashes when the R6 emulator is disabled. We fix this by ensuring that the 'rt' field is not zero which is always true for these branch compact instructions. Fixes: f1b44067c192 ("MIPS: Emulate the new MIPS R6 B{L,G}T{Z,}{AL,}C instructions") Fixes: a8ff66f52d3f ("MIPS: Emulate the new MIPS R6 B{L,G}E{Z,}{AL,}C instructions") Cc: # 4.0+ Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/10582/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index c0c5e59..d8f9b35 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; case blezl_op: /* not really i_format */ - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) goto sigill_r6; case blez_op: /* @@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; case bgtzl_op: - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) goto sigill_r6; case bgtz_op: /* diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 22b9b2c..00c241a 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; case blezl_op: - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) break; case blez_op: @@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; case bgtzl_op: - if (NO_R6EMU) + if (!insn.i_format.rt && NO_R6EMU) break; case bgtz_op: /* -- cgit v0.10.2 From 143fefc8f315cd10e046e6860913c421c3385cb1 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 24 Jun 2015 09:52:01 +0100 Subject: MIPS: Fix erroneous JR emulation for MIPS R6 Commit 5f9f41c474befb4ebbc40b27f65bb7d649241581 ("MIPS: kernel: Prepare the JR instruction for emulation on MIPS R6") added support for emulating the JR instruction on MIPS R6 cores but that introduced a bug which could be triggered when hitting a JALR opcode because the code used the wrong field in the 'r_format' struct to determine the instruction opcode. This lead to crashes because an emulated JALR instruction was treated as a JR one when the R6 emulator was turned off. Fixes: 5f9f41c474be ("MIPS: kernel: Prepare the JR instruction for emulation on MIPS R6") Cc: # 4.0+ Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10583/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 00c241a..712f17a 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, /* Fall through */ case jr_op: /* For R6, JR already emulated in jalr_op */ - if (NO_R6EMU && insn.r_format.opcode == jr_op) + if (NO_R6EMU && insn.r_format.func == jr_op) break; *contpc = regs->regs[insn.r_format.rs]; return 1; -- cgit v0.10.2 From fd5ed3066bb2f47814fe53cdc56d11a678551ae1 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:13:28 +0100 Subject: MIPS: kernel: smp-cps: Fix 64-bit compatibility errors due to pointer casting Commit 1d8f1f5a780a ("MIPS: smp-cps: hotplug support") added hotplug support in the SMP/CPS implementation but it introduced a few build problems on 64-bit kernels due to pointer being casted to and from 'int' C types. We fix this problem by using 'unsigned long' instead which should match the size of the pointers in 32/64-bit kernels. Finally, we fix the comment since the CM base address is loaded to v1($3) instead of v0. Fixes the following build problems: arch/mips/kernel/smp-cps.c: In function 'wait_for_sibling_halt': arch/mips/kernel/smp-cps.c:366:17: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast] [...] arch/mips/kernel/smp-cps.c: In function 'cps_cpu_die': arch/mips/kernel/smp-cps.c:427:13: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] cc1: all warnings being treated as errors Fixes: 1d8f1f5a780a ("MIPS: smp-cps: hotplug support") Cc: # 3.16+ Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10586/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 4251d39..c889377 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) /* * Patch the start of mips_cps_core_entry to provide: * - * v0 = CM base address + * v1 = CM base address * s0 = kseg0 CCA */ entry_code = (u32 *)&mips_cps_core_entry; @@ -369,7 +369,7 @@ void play_dead(void) static void wait_for_sibling_halt(void *ptr_cpu) { - unsigned cpu = (unsigned)ptr_cpu; + unsigned cpu = (unsigned long)ptr_cpu; unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); unsigned halted; unsigned long flags; @@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu) */ err = smp_call_function_single(cpu_death_sibling, wait_for_sibling_halt, - (void *)cpu, 1); + (void *)(unsigned long)cpu, 1); if (err) panic("Failed to call remote sibling CPU\n"); } -- cgit v0.10.2 From 81a02e34ded906357deac7003fbb0d36b6cc503f Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:13:29 +0100 Subject: MIPS: kernel: cps-vec: Replace 'la' macro with PTR_LA The PTR_LA macro will pick the correct "la" or "dla" macro to load an address to a register. This gets rids of the following warnings (and others) when building a 64-bit CPS kernel: arch/mips/kernel/cps-vec.S:63: Warning: la used to load 64-bit address arch/mips/kernel/cps-vec.S:159: Warning: la used to load 64-bit address arch/mips/kernel/cps-vec.S:220: Warning: la used to load 64-bit address arch/mips/kernel/cps-vec.S:240: Warning: la used to load 64-bit address [...] Cc: # 3.16+ Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10587/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 55b759a..a4b2d81 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry) nop /* This is an NMI */ - la k0, nmi_handler + PTR_LA k0, nmi_handler jr k0 nop @@ -156,7 +156,7 @@ dcache_done: ehb /* Jump to kseg0 */ - la t0, 1f + PTR_LA t0, 1f jr t0 nop @@ -217,7 +217,7 @@ LEAF(excep_intex) .org 0x480 LEAF(excep_ejtag) - la k0, ejtag_debug_handler + PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) @@ -237,7 +237,7 @@ LEAF(mips_cps_core_init) /* ...and for the moment only 1 VPE */ dvpe - la t1, 1f + PTR_LA t1, 1f jr.hb t1 nop @@ -298,14 +298,14 @@ LEAF(mips_cps_core_init) LEAF(mips_cps_boot_vpes) /* Retrieve CM base address */ - la t0, mips_cm_base + PTR_LA t0, mips_cm_base lw t0, 0(t0) /* Calculate a pointer to this cores struct core_boot_config */ lw t0, GCR_CL_ID_OFS(t0) li t1, COREBOOTCFG_SIZE mul t0, t0, t1 - la t1, mips_cps_core_bootcfg + PTR_LA t1, mips_cps_core_bootcfg lw t1, 0(t1) addu t0, t0, t1 @@ -351,7 +351,7 @@ LEAF(mips_cps_boot_vpes) 1: /* Enter VPE configuration state */ dvpe - la t1, 1f + PTR_LA t1, 1f jr.hb t1 nop 1: mfc0 t1, CP0_MVPCONTROL @@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes) /* This VPE should be offline, halt the TC */ li t0, TCHALT_H mtc0 t0, CP0_TCHALT - la t0, 1f + PTR_LA t0, 1f 1: jr.hb t0 nop @@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes) .set noat lw $1, TI_CPU(gp) sll $1, $1, LONGLOG - la \dest, __per_cpu_offset + PTR_LA \dest, __per_cpu_offset addu $1, $1, \dest lw $1, 0($1) - la \dest, cps_cpu_state + PTR_LA \dest, cps_cpu_state addu \dest, \dest, $1 .set pop .endm -- cgit v0.10.2 From 977e043d5ea1270ce985e4c165724ff91dc3c3e2 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:13:30 +0100 Subject: MIPS: kernel: cps-vec: Replace mips32r2 ISA level with mips64r2 mips32r2 is a subset of mips64r2, so we replace mips32r2 with mips64r2 in preparation for 64-bit CPS support. Cc: # 3.16+ Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10588/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index a4b2d81..bbbd88e 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -229,7 +229,7 @@ LEAF(mips_cps_core_init) nop .set push - .set mips32r2 + .set mips64r2 .set mt /* Only allow 1 TC per VPE to execute... */ @@ -346,7 +346,7 @@ LEAF(mips_cps_boot_vpes) nop .set push - .set mips32r2 + .set mips64r2 .set mt 1: /* Enter VPE configuration state */ -- cgit v0.10.2 From 0586ac75cd0746a4d5c43372dabcea8739ae0176 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:13:31 +0100 Subject: MIPS: kernel: cps-vec: Use ta0-ta3 pseudo-registers for 64-bit The cps-vec code assumes O32 ABI and uses t4-t7 in quite a few places. This breaks the build on 64-bit. As a result of which, use the pseudo-registers ta0-ta3 to make the code compatible with 64-bit. Cc: # 3.16+ Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10589/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index bbbd88e..21f714a 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -250,25 +250,25 @@ LEAF(mips_cps_core_init) mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) - addiu t7, t0, 1 + addiu ta3, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f nop /* Loop through each VPE within this core */ - li t5, 1 + li ta1, 1 1: /* Operate on the appropriate TC */ - mtc0 t5, CP0_VPECONTROL + mtc0 ta1, CP0_VPECONTROL ehb /* Bind TC to VPE (1:1 TC:VPE mapping) */ - mttc0 t5, CP0_TCBIND + mttc0 ta1, CP0_TCBIND /* Set exclusive TC, non-active, master */ li t0, VPECONF0_MVP - sll t1, t5, VPECONF0_XTC_SHIFT + sll t1, ta1, VPECONF0_XTC_SHIFT or t0, t0, t1 mttc0 t0, CP0_VPECONF0 @@ -280,8 +280,8 @@ LEAF(mips_cps_core_init) mttc0 t0, CP0_TCHALT /* Next VPE */ - addiu t5, t5, 1 - slt t0, t5, t7 + addiu ta1, ta1, 1 + slt t0, ta1, ta3 bnez t0, 1b nop @@ -310,7 +310,7 @@ LEAF(mips_cps_boot_vpes) addu t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ - has_mt t6, 1f + has_mt ta2, 1f li t9, 0 /* Find the number of VPEs present in the core */ @@ -334,13 +334,13 @@ LEAF(mips_cps_boot_vpes) 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE mul v0, t9, t1 - lw t7, COREBOOTCFG_VPECONFIG(t0) - addu v0, v0, t7 + lw ta3, COREBOOTCFG_VPECONFIG(t0) + addu v0, v0, ta3 #ifdef CONFIG_MIPS_MT /* If the core doesn't support MT then return */ - bnez t6, 1f + bnez ta2, 1f nop jr ra nop @@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes) ehb /* Loop through each VPE */ - lw t6, COREBOOTCFG_VPEMASK(t0) - move t8, t6 - li t5, 0 + lw ta2, COREBOOTCFG_VPEMASK(t0) + move t8, ta2 + li ta1, 0 /* Check whether the VPE should be running. If not, skip it */ -1: andi t0, t6, 1 +1: andi t0, ta2, 1 beqz t0, 2f nop @@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes) mfc0 t0, CP0_VPECONTROL ori t0, t0, VPECONTROL_TARGTC xori t0, t0, VPECONTROL_TARGTC - or t0, t0, t5 + or t0, t0, ta1 mtc0 t0, CP0_VPECONTROL ehb @@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes) /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE - mul t0, t0, t5 - addu t0, t0, t7 + mul t0, t0, ta1 + addu t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) @@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes) mttc0 t0, CP0_VPECONF0 /* Next VPE */ -2: srl t6, t6, 1 - addiu t5, t5, 1 - bnez t6, 1b +2: srl ta2, ta2, 1 + addiu ta1, ta1, 1 + bnez ta2, 1b nop /* Leave VPE configuration state */ -- cgit v0.10.2 From 717f14255a52ad445d6f0eca7d0f22f59d6ba1f8 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:13:32 +0100 Subject: MIPS: kernel: cps-vec: Replace KSEG0 with CKSEG0 In preparation for 64-bit CPS support, we replace KSEG0 with CKSEG0 so 64-bit kernels can be supported. Cc: # 3.16+ Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10590/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 21f714a..2f95568 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -107,7 +107,7 @@ not_nmi: mul t1, t1, t0 mul t1, t1, t2 - li a0, KSEG0 + li a0, CKSEG0 add a1, a0, t1 1: cache Index_Store_Tag_I, 0(a0) add a0, a0, t0 @@ -134,7 +134,7 @@ icache_done: mul t1, t1, t0 mul t1, t1, t2 - li a0, KSEG0 + li a0, CKSEG0 addu a1, a0, t1 subu a1, a1, t0 1: cache Index_Store_Tag_D, 0(a0) -- cgit v0.10.2 From b677bc03d757c7d749527cccdd2afcf34ebeeb07 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:13:33 +0100 Subject: MIPS: cps-vec: Use macros for various arithmetics and memory operations Replace lw/sw and various arithmetic instructions with macros so the code can work on 64-bit kernels as well. Cc: # 3.16+ Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10591/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 2f95568..1b6ca63 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -108,9 +108,9 @@ not_nmi: mul t1, t1, t2 li a0, CKSEG0 - add a1, a0, t1 + PTR_ADD a1, a0, t1 1: cache Index_Store_Tag_I, 0(a0) - add a0, a0, t0 + PTR_ADD a0, a0, t0 bne a0, a1, 1b nop icache_done: @@ -135,11 +135,11 @@ icache_done: mul t1, t1, t2 li a0, CKSEG0 - addu a1, a0, t1 - subu a1, a1, t0 + PTR_ADDU a1, a0, t1 + PTR_SUBU a1, a1, t0 1: cache Index_Store_Tag_D, 0(a0) bne a0, a1, 1b - add a0, a0, t0 + PTR_ADD a0, a0, t0 dcache_done: /* Set Kseg0 CCA to that in s0 */ @@ -152,7 +152,7 @@ dcache_done: /* Enter the coherent domain */ li t0, 0xff - sw t0, GCR_CL_COHERENCE_OFS(v1) + PTR_S t0, GCR_CL_COHERENCE_OFS(v1) ehb /* Jump to kseg0 */ @@ -178,9 +178,9 @@ dcache_done: nop /* Off we go! */ - lw t1, VPEBOOTCFG_PC(v0) - lw gp, VPEBOOTCFG_GP(v0) - lw sp, VPEBOOTCFG_SP(v0) + PTR_L t1, VPEBOOTCFG_PC(v0) + PTR_L gp, VPEBOOTCFG_GP(v0) + PTR_L sp, VPEBOOTCFG_SP(v0) jr t1 nop END(mips_cps_core_entry) @@ -299,15 +299,15 @@ LEAF(mips_cps_core_init) LEAF(mips_cps_boot_vpes) /* Retrieve CM base address */ PTR_LA t0, mips_cm_base - lw t0, 0(t0) + PTR_L t0, 0(t0) /* Calculate a pointer to this cores struct core_boot_config */ - lw t0, GCR_CL_ID_OFS(t0) + PTR_L t0, GCR_CL_ID_OFS(t0) li t1, COREBOOTCFG_SIZE mul t0, t0, t1 PTR_LA t1, mips_cps_core_bootcfg - lw t1, 0(t1) - addu t0, t0, t1 + PTR_L t1, 0(t1) + PTR_ADDU t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ has_mt ta2, 1f @@ -334,8 +334,8 @@ LEAF(mips_cps_boot_vpes) 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE mul v0, t9, t1 - lw ta3, COREBOOTCFG_VPECONFIG(t0) - addu v0, v0, ta3 + PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) + PTR_ADDU v0, v0, ta3 #ifdef CONFIG_MIPS_MT @@ -360,7 +360,7 @@ LEAF(mips_cps_boot_vpes) ehb /* Loop through each VPE */ - lw ta2, COREBOOTCFG_VPEMASK(t0) + PTR_L ta2, COREBOOTCFG_VPEMASK(t0) move t8, ta2 li ta1, 0 -- cgit v0.10.2 From 1c885357da2d3cf62132e611c0beaf4cdf607dd9 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 1 Jul 2015 09:31:14 +0100 Subject: Revert "MIPS: Kconfig: Disable SMP/CPS for 64-bit" This reverts commit 6ca716f2e5571d25a3899c6c5c91ff72ea6d6f5e. SMP/CPS is now supported on 64bit cores. Cc: # 4.1 Reviewed-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10592/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2a14585..aab7e46 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2231,7 +2231,7 @@ config MIPS_CMP config MIPS_CPS bool "MIPS Coherent Processing System support" - depends on SYS_SUPPORTS_MIPS_CPS && !64BIT + depends on SYS_SUPPORTS_MIPS_CPS select MIPS_CM select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU -- cgit v0.10.2 From a0f67441b06525a1e5fd713ba0d75af4e5d6b198 Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Thu, 9 Jul 2015 14:41:53 +0530 Subject: drm/amdkfd: validate pdd where it acquired first Currently pdd is validate after dereferencing it, which is not correct, Thus validate pdd before its first use. Signed-off-by: Maninder Singh Signed-off-by: Oded Gabbay diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 8a1f999..9be0070 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) pqm_uninit(&p->pqm); pdd = kfd_get_process_device_data(dev, p); + + if (!pdd) { + mutex_unlock(&p->mutex); + return; + } + if (pdd->reset_wavefronts) { dbgdev_wave_reset_wavefronts(pdd->dev, p); pdd->reset_wavefronts = false; @@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) * We don't call amd_iommu_unbind_pasid() here * because the IOMMU called us. */ - if (pdd) - pdd->bound = false; + pdd->bound = false; mutex_unlock(&p->mutex); } -- cgit v0.10.2 From e2fc61f384c9224b55990a644bbcf68c25e20203 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Mon, 29 Jun 2015 19:15:03 +0300 Subject: ARCv2: [axs103] bump CPU frequency from 75 to 90 MHZ With up-to-date FPGA builds ARC cores are supposed to correctly operate even with 90 MHz clock (which is a target frequency for AXS103 release). Signed-off-by: Alexey Brodkin Cc: arc-linux-dev@synopsys.com diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 15c8d62..1cd5e82 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -12,7 +12,7 @@ / { compatible = "snps,arc"; - clock-frequency = <75000000>; + clock-frequency = <90000000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 199d428..2f0b332 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -12,7 +12,7 @@ / { compatible = "snps,arc"; - clock-frequency = <75000000>; + clock-frequency = <90000000>; #address-cells = <1>; #size-cells = <1>; -- cgit v0.10.2 From 80f420842ff42ad61f84584716d74ef635f13892 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 3 Jul 2015 11:26:22 +0530 Subject: ARC: Make ARC bitops "safer" (add anti-optimization) ARCompact/ARCv2 ISA provide that any instructions which deals with bitpos/count operand ASL, LSL, BSET, BCLR, BMSK .... will only consider lower 5 bits. i.e. auto-clamp the pos to 0-31. ARC Linux bitops exploited this fact by NOT explicitly masking out upper bits for @nr operand in general, saving a bunch of AND/BMSK instructions in generated code around bitops. While this micro-optimization has worked well over years it is NOT safe as shifting a number with a value, greater than native size is "undefined" per "C" spec. So as it turns outm EZChip ran into this eventually, in their massive muti-core SMP build with 64 cpus. There was a test_bit() inside a loop from 63 to 0 and gcc was weirdly optimizing away the first iteration (so it was really adhering to standard by implementing undefined behaviour vs. removing all the iterations which were phony i.e. (1 << [63..32]) | for i = 63 to 0 | X = ( 1 << i ) | if X == 0 | continue So fix the code to do the explicit masking at the expense of generating additional instructions. Fortunately, this can be mitigated to a large extent as gcc has SHIFT_COUNT_TRUNCATED which allows combiner to fold masking into shift operation itself. It is currently not enabled in ARC gcc backend, but could be done after a bit of testing. Fixes STAR 9000866918 ("unsafe "undefined behavior" code in kernel") Reported-by: Noam Camus Cc: Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 99fe118..57c1f33 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ * done for const @nr, but no code is generated due to gcc \ * const prop. \ */ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ + nr &= 0x1f; \ \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ @@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ + nr &= 0x1f; \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ unsigned long temp, flags; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ /* \ * spin lock/unlock provide the needed smp_mb() before/after \ */ \ bitops_lock(flags); \ \ temp = *m; \ - *m = temp c_op (1UL << nr); \ + *m = temp c_op (1UL << (nr & 0x1f)); \ \ bitops_unlock(flags); \ } @@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * unsigned long old, flags; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ bitops_lock(flags); \ \ old = *m; \ - *m = old c_op (1 << nr); \ + *m = old c_op (1UL << (nr & 0x1f)); \ \ bitops_unlock(flags); \ \ - return (old & (1 << nr)) != 0; \ + return (old & (1UL << (nr & 0x1f))) != 0; \ } #endif /* CONFIG_ARC_HAS_LLSC */ @@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \ unsigned long temp; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ temp = *m; \ - *m = temp c_op (1UL << nr); \ + *m = temp c_op (1UL << (nr & 0x1f)); \ } #define __TEST_N_BIT_OP(op, c_op, asm_op) \ @@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long unsigned long old; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ old = *m; \ - *m = old c_op (1 << nr); \ + *m = old c_op (1UL << (nr & 0x1f)); \ \ - return (old & (1 << nr)) != 0; \ + return (old & (1UL << (nr & 0x1f))) != 0; \ } #define BIT_OPS(op, c_op, asm_op) \ @@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr) addr += nr >> 5; - if (__builtin_constant_p(nr)) - nr &= 0x1f; - - mask = 1 << nr; + mask = 1UL << (nr & 0x1f); return ((mask & *addr) != 0); } -- cgit v0.10.2 From 70d93d89416562c32adc9444a15677bdf25a72ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=ABl=20Porquet?= Date: Tue, 7 Jul 2015 17:14:56 -0400 Subject: arc:irqchip: prepare for drivers/irqchip/irqchip.h removal The IRQCHIP_DECLARE macro migrated to 'include/linux/irqchip.h'. See commit 91e20b5040c67c51aad88cf87db4305c5bd7f79d ("irqchip: Move IRQCHIP_DECLARE macro to include/linux/irqchip.h"). This patch removes the inclusions of private header 'drivers/irqchip/irqchip.h' and if necessary replaces them with inclusions of 'include/linux/irqchip.h'. Signed-off-by: Joel Porquet Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 6208c63..26c1568 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -12,7 +12,6 @@ #include #include #include -#include "../../drivers/irqchip/irqchip.h" #include /* diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index fcdddb6..039fac3 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -12,7 +12,6 @@ #include #include #include -#include "../../drivers/irqchip/irqchip.h" #include /* diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 6fb0a2f..2fb8658 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -175,7 +175,6 @@ void mcip_init_early_smp(void) #include #include #include -#include "../../drivers/irqchip/irqchip.h" /* * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) -- cgit v0.10.2 From 9138d4138d8e1f0acf734d2fb3455108bf43380f Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 25 Jun 2015 13:46:57 +0530 Subject: ARC: Add llock/scond to futex backend Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h index 05b5aaf..70cfe16 100644 --- a/arch/arc/include/asm/futex.h +++ b/arch/arc/include/asm/futex.h @@ -16,12 +16,40 @@ #include #include +#ifdef CONFIG_ARC_HAS_LLSC + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ + \ + __asm__ __volatile__( \ + "1: llock %1, [%2] \n" \ + insn "\n" \ + "2: scond %0, [%2] \n" \ + " bnz 1b \n" \ + " mov %0, 0 \n" \ + "3: \n" \ + " .section .fixup,\"ax\" \n" \ + " .align 4 \n" \ + "4: mov %0, %4 \n" \ + " b 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " .align 4 \n" \ + " .word 1b, 4b \n" \ + " .word 2b, 4b \n" \ + " .previous \n" \ + \ + : "=&r" (ret), "=&r" (oldval) \ + : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ + : "cc", "memory") + +#else /* !CONFIG_ARC_HAS_LLSC */ + #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ \ __asm__ __volatile__( \ - "1: ld %1, [%2] \n" \ + "1: ld %1, [%2] \n" \ insn "\n" \ - "2: st %0, [%2] \n" \ + "2: st %0, [%2] \n" \ " mov %0, 0 \n" \ "3: \n" \ " .section .fixup,\"ax\" \n" \ @@ -39,6 +67,8 @@ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ : "cc", "memory") +#endif + static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; @@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, pagefault_disable(); - /* TBD : can use llock/scond */ __asm__ __volatile__( - "1: ld %0, [%3] \n" - " brne %0, %1, 3f \n" - "2: st %2, [%3] \n" +#ifdef CONFIG_ARC_HAS_LLSC + "1: llock %0, [%3] \n" + " brne %0, %1, 3f \n" + "2: scond %2, [%3] \n" + " bnz 1b \n" +#else + "1: ld %0, [%3] \n" + " brne %0, %1, 3f \n" + "2: st %2, [%3] \n" +#endif "3: \n" " .section .fixup,\"ax\" \n" "4: mov %0, %4 \n" -- cgit v0.10.2 From b631788ab42ea4ba97dc30b3f47bad0edb0ddb4b Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 6 Jul 2015 15:25:21 +0530 Subject: ARC: slightly refactor macros for boot logging Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index a3d1862..08d98ee 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -150,9 +150,10 @@ static const struct cpuinfo_data arc_cpu_tbl[] = { { {0x00, NULL } } }; -#define IS_AVAIL1(v, str) ((v) ? str : "") -#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") -#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) +#define IS_AVAIL1(v, s) ((v) ? s : "") +#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") +#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) +#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { @@ -226,7 +227,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); } n += scnprintf(buf + n, len - n, "%s", - IS_USED(CONFIG_ARC_HAS_HW_MPY)); + IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY)); } n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", -- cgit v0.10.2 From 08ae217b8d44986062fe3648c5bb83816d5bc00f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 9 Jul 2015 12:14:43 -0300 Subject: perf thread_map: Fix the sizeof() calculation for map entries When we started adding extra stuff per array entry, growing the size of those entries to more than sizeof(pid_t), we had to convert those sizeof operations to the more robust sizeof(map->map[0]) idiom, that is future proof, i.e. if/when we add more stuff to those entries, that expression will produce the new per-entry size. And besides that, we need to zero out those extra fields, that sometimes may not get filled, like when we couldn't care less about the comms, since we don't need those, but since we will try freeing it at thread_map__delete(), we better fix it. That is why a thread_map__realloc() was provided. But that method wasn't used in thread_map__new_by_uid(), fix it. Reported-by: Ingo Molnar Fixes: 792402fd5c0a ("perf thrad_map: Add comm string into array") Fixes: 9d7e8c3a96e5 ("perf tools: Add thread_map__(alloc|realloc) helpers") Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-6a0swlm6m8lnu3wpjv284hkb@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index da7646d..292ae2c 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -136,8 +136,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) if (grow) { struct thread_map *tmp; - tmp = realloc(threads, (sizeof(*threads) + - max_threads * sizeof(pid_t))); + tmp = thread_map__realloc(threads, max_threads); if (tmp == NULL) goto out_free_namelist; -- cgit v0.10.2 From cd404af0c930104462aa91344f07d002cf8248ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 7 Jul 2015 16:27:28 +0900 Subject: drm/radeon: Clean up reference counting and pinning of the cursor BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Take a GEM reference for and pin the new cursor BO, unpin and drop the GEM reference for the old cursor BO in radeon_crtc_cursor_set2, and use radeon_crtc->cursor_addr in radeon_set_cursor. This fixes radeon_cursor_reset accidentally incrementing the cursor BO pin count, and cleans up the code a little. Cc: stable@vger.kernel.org Reviewed-by: Grigori Goronzy Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 45e5406..fa66174 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -205,8 +205,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) | (x << 16) | y)); /* offset is from DISP(2)_BASE_ADDRESS */ - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + - (yorigin * 256))); + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, + radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr + + yorigin * 256); } radeon_crtc->cursor_x = x; @@ -227,51 +228,32 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, return ret; } -static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj) +static void radeon_set_cursor(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; - struct radeon_bo *robj = gem_to_radeon_bo(obj); - uint64_t gpu_addr; - int ret; - - ret = radeon_bo_reserve(robj, false); - if (unlikely(ret != 0)) - goto fail; - /* Only 27 bit offset for legacy cursor */ - ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, - ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, - &gpu_addr); - radeon_bo_unreserve(robj); - if (ret) - goto fail; if (ASIC_IS_DCE4(rdev)) { WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, - upper_32_bits(gpu_addr)); + upper_32_bits(radeon_crtc->cursor_addr)); WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, - gpu_addr & 0xffffffff); + lower_32_bits(radeon_crtc->cursor_addr)); } else if (ASIC_IS_AVIVO(rdev)) { if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) - WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); + WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, + upper_32_bits(radeon_crtc->cursor_addr)); else - WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); + WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, + upper_32_bits(radeon_crtc->cursor_addr)); } WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, - gpu_addr & 0xffffffff); + lower_32_bits(radeon_crtc->cursor_addr)); } else { - radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; /* offset is from DISP(2)_BASE_ADDRESS */ - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, + radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr); } - - return 0; - -fail: - drm_gem_object_unreference_unlocked(obj); - - return ret; } int radeon_crtc_cursor_set2(struct drm_crtc *crtc, @@ -283,7 +265,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_device *rdev = crtc->dev->dev_private; struct drm_gem_object *obj; + struct radeon_bo *robj; int ret; if (!handle) { @@ -305,6 +289,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, return -ENOENT; } + robj = gem_to_radeon_bo(obj); + ret = radeon_bo_reserve(robj, false); + if (ret != 0) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + /* Only 27 bit offset for legacy cursor */ + ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, + ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, + &radeon_crtc->cursor_addr); + radeon_bo_unreserve(robj); + if (ret) { + DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); + drm_gem_object_unreference_unlocked(obj); + return ret; + } + radeon_crtc->cursor_width = width; radeon_crtc->cursor_height = height; @@ -323,13 +324,8 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, radeon_crtc->cursor_hot_y = hot_y; } - ret = radeon_set_cursor(crtc, obj); - - if (ret) - DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n", - ret); - else - radeon_show_cursor(crtc); + radeon_set_cursor(crtc); + radeon_show_cursor(crtc); radeon_lock_cursor(crtc, false); @@ -341,8 +337,7 @@ unpin: radeon_bo_unpin(robj); radeon_bo_unreserve(robj); } - if (radeon_crtc->cursor_bo != obj) - drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); + drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); } radeon_crtc->cursor_bo = obj; @@ -360,7 +355,6 @@ unpin: void radeon_cursor_reset(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int ret; if (radeon_crtc->cursor_bo) { radeon_lock_cursor(crtc, true); @@ -368,12 +362,8 @@ void radeon_cursor_reset(struct drm_crtc *crtc) radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, radeon_crtc->cursor_y); - ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo); - if (ret) - DRM_ERROR("radeon_set_cursor returned %d, not showing " - "cursor\n", ret); - else - radeon_show_cursor(crtc); + radeon_set_cursor(crtc); + radeon_show_cursor(crtc); radeon_lock_cursor(crtc, false); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 6de5459..07909d8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -343,7 +343,6 @@ struct radeon_crtc { int max_cursor_width; int max_cursor_height; uint32_t legacy_display_base_addr; - uint32_t legacy_cursor_offset; enum radeon_rmx_type rmx_type; u8 h_border; u8 v_border; -- cgit v0.10.2 From f3cbb17bcf676a2fc6aedebe9fbebd59e550c51a Mon Sep 17 00:00:00 2001 From: Grigori Goronzy Date: Tue, 7 Jul 2015 16:27:29 +0900 Subject: drm/radeon: unpin cursor BOs on suspend and pin them again on resume (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Everything is evicted from VRAM before suspend, so we need to make sure all BOs are unpinned and re-pinned after resume. Fixes broken mouse cursor after resume introduced by commit b9729b17. [Michel Dänzer: Add pinning BOs on resume] v2: [Alex Deucher: merge cursor unpin into fb unpin loop] Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=100541 Cc: stable@vger.kernel.org Reviewed-by: Christian König (v1) Signed-off-by: Grigori Goronzy Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 1a532a9..d8319da 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1578,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } - /* unpin the front buffers */ + /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); struct radeon_bo *robj; + if (radeon_crtc->cursor_bo) { + struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); + r = radeon_bo_reserve(robj, false); + if (r == 0) { + radeon_bo_unpin(robj); + radeon_bo_unreserve(robj); + } + } + if (rfb == NULL || rfb->obj == NULL) { continue; } @@ -1645,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; + struct drm_crtc *crtc; int r; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) @@ -1684,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) radeon_restore_bios_scratch_regs(rdev); + /* pin cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + if (radeon_crtc->cursor_bo) { + struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); + r = radeon_bo_reserve(robj, false); + if (r == 0) { + /* Only 27 bit offset for legacy cursor */ + r = radeon_bo_pin_restricted(robj, + RADEON_GEM_DOMAIN_VRAM, + ASIC_IS_AVIVO(rdev) ? + 0 : 1 << 27, + &radeon_crtc->cursor_addr); + if (r != 0) + DRM_ERROR("Failed to pin cursor BO (%d)\n", r); + radeon_bo_unreserve(robj); + } + } + } + /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); -- cgit v0.10.2 From 8991668ab4e26f985a8485719bce5d6d0623a644 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 7 Jul 2015 16:27:30 +0900 Subject: drm/radeon: Fold radeon_set_cursor() into radeon_show_cursor() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Grigori Goronzy Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index fa66174..afaf346 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc) struct radeon_device *rdev = crtc->dev->dev_private; if (ASIC_IS_DCE4(rdev)) { + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(radeon_crtc->cursor_addr)); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + lower_32_bits(radeon_crtc->cursor_addr)); WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { + if (rdev->family >= CHIP_RV770) { + if (radeon_crtc->crtc_id) + WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, + upper_32_bits(radeon_crtc->cursor_addr)); + else + WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, + upper_32_bits(radeon_crtc->cursor_addr)); + } + + WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + lower_32_bits(radeon_crtc->cursor_addr)); WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); } else { + /* offset is from DISP(2)_BASE_ADDRESS */ + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, + radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr); + switch (radeon_crtc->crtc_id) { case 0: WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); @@ -228,34 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, return ret; } -static void radeon_set_cursor(struct drm_crtc *crtc) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct radeon_device *rdev = crtc->dev->dev_private; - - if (ASIC_IS_DCE4(rdev)) { - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, - upper_32_bits(radeon_crtc->cursor_addr)); - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, - lower_32_bits(radeon_crtc->cursor_addr)); - } else if (ASIC_IS_AVIVO(rdev)) { - if (rdev->family >= CHIP_RV770) { - if (radeon_crtc->crtc_id) - WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, - upper_32_bits(radeon_crtc->cursor_addr)); - else - WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, - upper_32_bits(radeon_crtc->cursor_addr)); - } - WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, - lower_32_bits(radeon_crtc->cursor_addr)); - } else { - /* offset is from DISP(2)_BASE_ADDRESS */ - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, - radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr); - } -} - int radeon_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, @@ -324,7 +315,6 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, radeon_crtc->cursor_hot_y = hot_y; } - radeon_set_cursor(crtc); radeon_show_cursor(crtc); radeon_lock_cursor(crtc, false); @@ -362,7 +352,6 @@ void radeon_cursor_reset(struct drm_crtc *crtc) radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, radeon_crtc->cursor_y); - radeon_set_cursor(crtc); radeon_show_cursor(crtc); radeon_lock_cursor(crtc, false); -- cgit v0.10.2 From d57c0edfe00d3274b50f91ce3076ed0e82d28782 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 8 Jul 2015 14:08:12 -0400 Subject: Revert "Revert "drm/radeon: dont switch vt on suspend"" This reverts commit ac9134906b3f5c2b45dc80dab0fee792bd516d52. We've fixed the underlying problem with cursors, so re-enable this. diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 634793e..aeb6767 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; + info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { -- cgit v0.10.2 From eb99070b4ad105f8a516d99e95226180924eddc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 8 Jul 2015 09:56:13 +0200 Subject: drm/radeon: allways add the VM clear duplicate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to allways add the VM clear duplicate of the BO_VA, no matter what the old status was. Signed-off-by: Christian König Test-by: hadack@gmx.de Tested-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index ec10533..0310965 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -493,29 +493,27 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, } if (bo_va->it.start || bo_va->it.last) { - spin_lock(&vm->status_lock); - if (list_empty(&bo_va->vm_status)) { - /* add a clone of the bo_va to clear the old address */ - struct radeon_bo_va *tmp; - spin_unlock(&vm->status_lock); - tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); - if (!tmp) { - mutex_unlock(&vm->mutex); - r = -ENOMEM; - goto error_unreserve; - } - tmp->it.start = bo_va->it.start; - tmp->it.last = bo_va->it.last; - tmp->vm = vm; - tmp->bo = radeon_bo_ref(bo_va->bo); - spin_lock(&vm->status_lock); - list_add(&tmp->vm_status, &vm->freed); + /* add a clone of the bo_va to clear the old address */ + struct radeon_bo_va *tmp; + tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); + if (!tmp) { + mutex_unlock(&vm->mutex); + r = -ENOMEM; + goto error_unreserve; } - spin_unlock(&vm->status_lock); + tmp->it.start = bo_va->it.start; + tmp->it.last = bo_va->it.last; + tmp->vm = vm; + tmp->bo = radeon_bo_ref(bo_va->bo); interval_tree_remove(&bo_va->it, &vm->va); bo_va->it.start = 0; bo_va->it.last = 0; + + spin_lock(&vm->status_lock); + list_del_init(&bo_va->vm_status); + list_add(&tmp->vm_status, &vm->freed); + spin_unlock(&vm->status_lock); } if (soffset || eoffset) { -- cgit v0.10.2 From dbedff05d163f8d1b4a97b91c57b939830fc235f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 8 Jul 2015 09:56:14 +0200 Subject: drm/radeon: check if BO_VA is set before adding it to the invalidation list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we try to clear BO_VAs without an address. Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=91141 Signed-off-by: Christian König Test-by: hadack@gmx.de Tested-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 0310965..48d97c0 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -507,22 +507,21 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, tmp->bo = radeon_bo_ref(bo_va->bo); interval_tree_remove(&bo_va->it, &vm->va); + spin_lock(&vm->status_lock); bo_va->it.start = 0; bo_va->it.last = 0; - - spin_lock(&vm->status_lock); list_del_init(&bo_va->vm_status); list_add(&tmp->vm_status, &vm->freed); spin_unlock(&vm->status_lock); } if (soffset || eoffset) { + spin_lock(&vm->status_lock); bo_va->it.start = soffset; bo_va->it.last = eoffset - 1; - interval_tree_insert(&bo_va->it, &vm->va); - spin_lock(&vm->status_lock); list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); + interval_tree_insert(&bo_va->it, &vm->va); } bo_va->flags = flags; @@ -1156,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, list_for_each_entry(bo_va, &bo->va, bo_list) { spin_lock(&bo_va->vm->status_lock); - if (list_empty(&bo_va->vm_status)) + if (list_empty(&bo_va->vm_status) && + (bo_va->it.start || bo_va->it.last)) list_add(&bo_va->vm_status, &bo_va->vm->invalidated); spin_unlock(&bo_va->vm->status_lock); } -- cgit v0.10.2 From 0f11770417d849558c727ed20e1cd07444e15a00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 8 Jul 2015 16:58:48 +0200 Subject: drm/amdgpu: fix timeout calculation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian König Reviewed-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 975edb1..ae43b58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) if (((int64_t)timeout_ns) < 0) return MAX_SCHEDULE_TIMEOUT; - timeout = ktime_sub_ns(ktime_get(), timeout_ns); + timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); if (ktime_to_ns(timeout) < 0) return 0; -- cgit v0.10.2 From 355c822847fa70fa1df6a7451b7ecf76116efcd2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 8 Jul 2015 12:58:20 -0400 Subject: drm/radeon: disable vce init on cayman (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cayman does not have vce. There were a few places in the shared cayman/TV code where we were trying to do vce stuff. v2: remove -ENOENT check Reviewed-by: Christian König Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 8e5aeeb..158872e 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } - ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; - if (ring->ring_size) - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); + if (rdev->family == CHIP_ARUBA) { + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); - ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; - if (ring->ring_size) - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); - if (!r) - r = vce_v1_0_init(rdev); - else if (r != -ENOENT) - DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); + if (!r) + r = vce_v1_0_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); + } r = radeon_ib_pool_init(rdev); if (r) { @@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev) radeon_irq_kms_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); - radeon_vce_fini(rdev); + if (rdev->family == CHIP_ARUBA) + radeon_vce_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); -- cgit v0.10.2 From 757856d2b9568a701df9ea6a4be68effbb9d6f44 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 25 Jun 2015 17:47:45 +0300 Subject: libceph: enable ceph in a non-default network namespace Grab a reference on a network namespace of the 'rbd map' (in case of rbd) or 'mount' (in case of ceph) process and use that to open sockets instead of always using init_net and bailing if network namespace is anything but init_net. Be careful to not share struct ceph_client instances between different namespaces and don't add any code in the !CONFIG_NET_NS case. This is based on a patch from Hong Zhiguo . Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index e154994..3775327 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,7 @@ struct ceph_messenger { struct ceph_entity_addr my_enc_addr; atomic_t stopping; + possible_net_t net; bool nocrc; bool tcp_nodelay; @@ -267,6 +269,7 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr, u64 required_features, bool nocrc, bool tcp_nodelay); +extern void ceph_messenger_fini(struct ceph_messenger *msgr); extern void ceph_con_init(struct ceph_connection *con, void *private, const struct ceph_connection_operations *ops, diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index cb7db32..f30329f 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -16,8 +17,6 @@ #include #include #include -#include -#include #include @@ -131,6 +130,13 @@ int ceph_compare_options(struct ceph_options *new_opt, int i; int ret; + /* + * Don't bother comparing options if network namespaces don't + * match. + */ + if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net))) + return -1; + ret = memcmp(opt1, opt2, ofs); if (ret) return ret; @@ -335,9 +341,6 @@ ceph_parse_options(char *options, const char *dev_name, int err = -ENOMEM; substring_t argstr[MAX_OPT_ARGS]; - if (current->nsproxy->net_ns != &init_net) - return ERR_PTR(-EINVAL); - opt = kzalloc(sizeof(*opt), GFP_KERNEL); if (!opt) return ERR_PTR(-ENOMEM); @@ -608,6 +611,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, fail_monc: ceph_monc_stop(&client->monc); fail: + ceph_messenger_fini(&client->msgr); kfree(client); return ERR_PTR(err); } @@ -621,8 +625,8 @@ void ceph_destroy_client(struct ceph_client *client) /* unmount */ ceph_osdc_stop(&client->osdc); - ceph_monc_stop(&client->monc); + ceph_messenger_fini(&client->msgr); ceph_debugfs_client_cleanup(client); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 1679f47..5c1f98e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -479,7 +480,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) int ret; BUG_ON(con->sock); - ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family, + ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, SOCK_STREAM, IPPROTO_TCP, &sock); if (ret) return ret; @@ -2944,11 +2945,18 @@ void ceph_messenger_init(struct ceph_messenger *msgr, msgr->tcp_nodelay = tcp_nodelay; atomic_set(&msgr->stopping, 0); + write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); dout("%s %p\n", __func__, msgr); } EXPORT_SYMBOL(ceph_messenger_init); +void ceph_messenger_fini(struct ceph_messenger *msgr) +{ + put_net(read_pnet(&msgr->net)); +} +EXPORT_SYMBOL(ceph_messenger_fini); + static void clear_standby(struct ceph_connection *con) { /* come back from STANDBY? */ -- cgit v0.10.2 From c44bd69c0c8cfadf0239437635b2933efb1f6c4c Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 9 Jul 2015 13:57:52 +0300 Subject: libceph: treat sockaddr_storage with uninitialized family as blank addr_is_blank() should return true if family is neither AF_INET nor AF_INET6. This is what its counterpart entity_addr_t::is_blank_ip() is doing and it is the right thing to do: in process_banner() we check if our address is blank and if it is "learn" it from our peer. As it is, we never learn our address and always send out a blank one. This goes way back to ceph.git commit dd732cbfc1c9 ("use sockaddr_storage; and some ipv6 support groundwork") from 2009. While at at, do not open-code ipv6_addr_any() and use INADDR_ANY constant instead of 0. Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 5c1f98e..e3be1d2 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1732,17 +1732,17 @@ static int verify_hello(struct ceph_connection *con) static bool addr_is_blank(struct sockaddr_storage *ss) { + struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr; + struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr; + switch (ss->ss_family) { case AF_INET: - return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; + return addr->s_addr == htonl(INADDR_ANY); case AF_INET6: - return - ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && - ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && - ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && - ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; + return ipv6_addr_any(addr6); + default: + return true; } - return false; } static int addr_port(struct sockaddr_storage *ss) -- cgit v0.10.2 From 398ecff5a562b7b69f77582f98a4b04d0de1f066 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 9 Jul 2015 11:46:14 -0400 Subject: MAINTAINERS: update ceph entries - The Ceph common code is used by both fs/ceph and drivers/block/rbd. Add a separate maintainers entry. - Add Ilya as libceph maintainer and cephfs submaintainer. - Attribute Documentation/ABI/testing/sysfs-bus-rbd to rbd. - ceph-devel@vger.kernel.org should be L, not M in rbd entry. Signed-off-by: Sage Weil Signed-off-by: Ilya Dryomov diff --git a/MAINTAINERS b/MAINTAINERS index 8133cef..58cbc21 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2562,19 +2562,29 @@ F: arch/powerpc/include/uapi/asm/spu*.h F: arch/powerpc/oprofile/*cell* F: arch/powerpc/platforms/cell/ -CEPH DISTRIBUTED FILE SYSTEM CLIENT +CEPH COMMON CODE (LIBCEPH) +M: Ilya Dryomov M: "Yan, Zheng" M: Sage Weil L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git S: Supported -F: Documentation/filesystems/ceph.txt -F: fs/ceph/ F: net/ceph/ F: include/linux/ceph/ F: include/linux/crush/ +CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH) +M: "Yan, Zheng" +M: Sage Weil +M: Ilya Dryomov +L: ceph-devel@vger.kernel.org +W: http://ceph.com/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git +S: Supported +F: Documentation/filesystems/ceph.txt +F: fs/ceph/ + CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: L: linux-usb@vger.kernel.org S: Orphan @@ -8366,10 +8376,11 @@ RADOS BLOCK DEVICE (RBD) M: Ilya Dryomov M: Sage Weil M: Alex Elder -M: ceph-devel@vger.kernel.org +L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git S: Supported +F: Documentation/ABI/testing/sysfs-bus-rbd F: drivers/block/rbd.c F: drivers/block/rbd_types.h -- cgit v0.10.2 From 6e67b7ae2157bdeb6b56381e530fc74bb68b6149 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 9 Jul 2015 11:47:37 -0400 Subject: MAINTAINERS: add secondary tree for ceph modules The Ceph kernel code is primarily developed in the github tree, and only pushed to the korg tree before going to Linus. If Sage is unavailable and another maintainer needs to push something upstream, pull requests may originate from the github tree instead of Sage's korg tree. Signed-off-by: Sage Weil Signed-off-by: Ilya Dryomov diff --git a/MAINTAINERS b/MAINTAINERS index 58cbc21..0d70760 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2569,6 +2569,7 @@ M: Sage Weil L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git +T: git git://github.com/ceph/ceph-client.git S: Supported F: net/ceph/ F: include/linux/ceph/ @@ -2581,6 +2582,7 @@ M: Ilya Dryomov L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git +T: git git://github.com/ceph/ceph-client.git S: Supported F: Documentation/filesystems/ceph.txt F: fs/ceph/ @@ -8379,6 +8381,7 @@ M: Alex Elder L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git +T: git git://github.com/ceph/ceph-client.git S: Supported F: Documentation/ABI/testing/sysfs-bus-rbd F: drivers/block/rbd.c -- cgit v0.10.2 From 6b7339f4c31ad69c8e9c0b2859276e22cf72176d Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 6 Jul 2015 23:18:37 +0300 Subject: mm: avoid setting up anonymous pages into file mapping Reading page fault handler code I've noticed that under right circumstances kernel would map anonymous pages into file mappings: if the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated on ->mmap(), kernel would handle page fault to not populated pte with do_anonymous_page(). Let's change page fault handler to use do_anonymous_page() only on anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not shared. For file mappings without vm_ops->fault() or shred VMA without vm_ops, page fault on pte_none() entry would lead to SIGBUS. Signed-off-by: Kirill A. Shutemov Acked-by: Oleg Nesterov Cc: Andrew Morton Cc: Willy Tarreau Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds diff --git a/mm/memory.c b/mm/memory.c index a84fbb7..388dcf9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2670,6 +2670,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, pte_unmap(page_table); + /* File mapping without ->vm_ops ? */ + if (vma->vm_flags & VM_SHARED) + return VM_FAULT_SIGBUS; + /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGSEGV; @@ -3099,6 +3103,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pte_unmap(page_table); + /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ + if (!vma->vm_ops->fault) + return VM_FAULT_SIGBUS; if (!(flags & FAULT_FLAG_WRITE)) return do_read_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); @@ -3244,13 +3251,12 @@ static int handle_pte_fault(struct mm_struct *mm, barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { - if (vma->vm_ops) { - if (likely(vma->vm_ops->fault)) - return do_fault(mm, vma, address, pte, - pmd, flags, entry); - } - return do_anonymous_page(mm, vma, address, - pte, pmd, flags); + if (vma->vm_ops) + return do_fault(mm, vma, address, pte, pmd, + flags, entry); + + return do_anonymous_page(mm, vma, address, pte, pmd, + flags); } return do_swap_page(mm, vma, address, pte, pmd, flags, entry); -- cgit v0.10.2 From 6f957724b94cb19f5c1c97efd01dd4df8ced323c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 9 Jul 2015 11:20:01 -0700 Subject: Fix firmware loader uevent buffer NULL pointer dereference The firmware class uevent function accessed the "fw_priv->buf" buffer without the proper locking and testing for NULL. This is an old bug (looks like it goes back to 2012 and commit 1244691c73b2: "firmware loader: introduce firmware_buf"), but for some reason it's triggering only now in 4.2-rc1. Shuah Khan is trying to bisect what it is that causes this to trigger more easily, but in the meantime let's just fix the bug since others are hitting it too (at least Ingo reports having seen it as well). Reported-and-tested-by: Shuah Khan Acked-by: Ming Lei Cc: Greg Kroah-Hartman Signed-off-by: Linus Torvalds diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 9c42883..894bda1 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -563,10 +563,8 @@ static void fw_dev_release(struct device *dev) kfree(fw_priv); } -static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) +static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env) { - struct firmware_priv *fw_priv = to_firmware_priv(dev); - if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) return -ENOMEM; if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) @@ -577,6 +575,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct firmware_priv *fw_priv = to_firmware_priv(dev); + int err = 0; + + mutex_lock(&fw_lock); + if (fw_priv->buf) + err = do_firmware_uevent(fw_priv, env); + mutex_unlock(&fw_lock); + return err; +} + static struct class firmware_class = { .name = "firmware", .class_attrs = firmware_class_attrs, -- cgit v0.10.2 From 3324603524925c7727207027d1c15e597412d15e Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 9 Jul 2015 14:20:36 -0400 Subject: selinux: don't waste ebitmap space when importing NetLabel categories At present we don't create efficient ebitmaps when importing NetLabel category bitmaps. This can present a problem when comparing ebitmaps since ebitmap_cmp() is very strict about these things and considers these wasteful ebitmaps not equal when compared to their more efficient counterparts, even if their values are the same. This isn't likely to cause problems on 64-bit systems due to a bit of luck on how NetLabel/CIPSO works and the default ebitmap size, but it can be a problem on 32-bit systems. This patch fixes this problem by being a bit more intelligent when importing NetLabel category bitmaps by skipping over empty sections which should result in a nice, efficient ebitmap. Cc: stable@vger.kernel.org # 3.17 Signed-off-by: Paul Moore diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index afe6a26..57644b1 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c @@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap, if (offset == (u32)-1) return 0; + /* don't waste ebitmap space if the netlabel bitmap is empty */ + if (bitmap == 0) { + offset += EBITMAP_UNIT_SIZE; + continue; + } + if (e_iter == NULL || offset >= e_iter->startbit + EBITMAP_SIZE) { e_prev = e_iter; -- cgit v0.10.2 From 02941007f59ce015233d4c0f7047776960bf0c17 Mon Sep 17 00:00:00 2001 From: "qipeng.zha" Date: Thu, 9 Jul 2015 00:14:15 +0800 Subject: intel_pmc_ipc: Update kerneldoc formatting Update kerneldoc formatting per Documentation/kernel-dec-nano-HOWTO.txt. Signed-off-by: qipeng.zha Signed-off-by: Darren Hart diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h index 200ec2e..cd0310e 100644 --- a/arch/x86/include/asm/intel_pmc_ipc.h +++ b/arch/x86/include/asm/intel_pmc_ipc.h @@ -25,36 +25,9 @@ #if IS_ENABLED(CONFIG_INTEL_PMC_IPC) -/* - * intel_pmc_ipc_simple_command - * @cmd: command - * @sub: sub type - */ int intel_pmc_ipc_simple_command(int cmd, int sub); - -/* - * intel_pmc_ipc_raw_cmd - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - * @sptr: data writing to SPTR register - * @dptr: data writing to DPTR register - */ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr); - -/* - * intel_pmc_ipc_command - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - */ int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen); diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 69d43b8..105cfff 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void) return ret; } -/* - * intel_pmc_ipc_simple_command - * @cmd: command - * @sub: sub type +/** + * intel_pmc_ipc_simple_command() - Simple IPC command + * @cmd: IPC command code. + * @sub: IPC command sub type. + * + * Send a simple IPC command to PMC when don't need to specify + * input/output data and source/dest pointers. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_simple_command(int cmd, int sub) { @@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) } EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); -/* - * intel_pmc_ipc_raw_cmd - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - * @sptr: data writing to SPTR register - * @dptr: data writing to DPTR register +/** + * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers + * @cmd: IPC command code. + * @sub: IPC command sub type. + * @in: input data of this IPC command. + * @inlen: input data length in bytes. + * @out: output data of this IPC command. + * @outlen: output data length in dwords. + * @sptr: data writing to SPTR register. + * @dptr: data writing to DPTR register. + * + * Send an IPC command to PMC with input/output data and source/dest pointers. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr) @@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, } EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); -/* - * intel_pmc_ipc_command - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords +/** + * intel_pmc_ipc_command() - IPC command with input/output data + * @cmd: IPC command code. + * @sub: IPC command sub type. + * @in: input data of this IPC command. + * @inlen: input data length in bytes. + * @out: output data of this IPC command. + * @outlen: output data length in dwords. + * + * Send an IPC command to PMC with input/output data. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen) -- cgit v0.10.2 From 9abea2d64ce93b6909de7f83a7f681f572369708 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 9 Jul 2015 18:05:15 +0200 Subject: ioctl_compat: handle FITRIM The FITRIM ioctl has the same arguments on 32-bit and 64-bit architectures, so we can add it to the list of compatible ioctls and drop it from compat_ioctl method of various filesystems. Signed-off-by: Mikulas Patocka Cc: Al Viro Cc: Ted Ts'o Signed-off-by: Linus Torvalds diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 6b8e2f0..48851f6 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -896,6 +896,7 @@ COMPATIBLE_IOCTL(FIGETBSZ) /* 'X' - originally XFS but some now in the VFS */ COMPATIBLE_IOCTL(FIFREEZE) COMPATIBLE_IOCTL(FITHAW) +COMPATIBLE_IOCTL(FITRIM) COMPATIBLE_IOCTL(KDGETKEYCODE) COMPATIBLE_IOCTL(KDSETKEYCODE) COMPATIBLE_IOCTL(KDGKBTYPE) diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 72afcc6..feef8a9 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -325,7 +325,6 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return rc; switch (cmd) { - case FITRIM: case FS_IOC32_GETFLAGS: case FS_IOC32_SETFLAGS: case FS_IOC32_GETVERSION: diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index cb84512..1346cfa 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -755,7 +755,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; } case EXT4_IOC_MOVE_EXT: - case FITRIM: case EXT4_IOC_RESIZE_FS: case EXT4_IOC_PRECACHE_EXTENTS: case EXT4_IOC_SET_ENCRYPTION_POLICY: diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c index 93a1232..8db8b7d 100644 --- a/fs/jfs/ioctl.c +++ b/fs/jfs/ioctl.c @@ -180,9 +180,6 @@ long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case JFS_IOC_SETFLAGS32: cmd = JFS_IOC_SETFLAGS; break; - case FITRIM: - cmd = FITRIM; - break; } return jfs_ioctl(filp, cmd, arg); } diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 9a20e51..aba4381 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -1369,7 +1369,6 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case NILFS_IOCTL_SYNC: case NILFS_IOCTL_RESIZE: case NILFS_IOCTL_SET_ALLOC_RANGE: - case FITRIM: break; default: return -ENOIOCTLCMD; diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 53e6c40..3cb097c 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -980,7 +980,6 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: - case FITRIM: break; case OCFS2_IOC_REFLINK: if (copy_from_user(&args, argp, sizeof(args))) -- cgit v0.10.2 From f3efe3a07e0060dc5d6c41644733e49c7bd50a5b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 9 Jul 2015 16:23:57 -0300 Subject: perf tools: Fix the detached tarball wrt rbtree copy The python binding build process was still looking at the kernel rbtree.c file, so, when doing a in-tree build it would work, but when creating a tarball using tools/perf/MANIFEST as the contents list and then trying to build the resulting detached sources, it failed. Fix it by removing one level of indirection from rbtree.c in the tools/perf/util/python-ext-sources file. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-8u83c2k5guyhxdlkaaqis8k4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index e23ded4..de05e04 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -19,5 +19,5 @@ util/rblist.c util/stat.c util/strlist.c util/trace-event.c -../../lib/rbtree.c +../lib/rbtree.c util/string.c -- cgit v0.10.2 From 0aefc3590afcc9ecbe173fc01fccbda0869d2f0a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 9 Jul 2015 16:27:25 -0300 Subject: tools: Copy lib/hweight.c from the kernel sources Instead of accessing it directly, as it uses EXPORT_SYMBOL, that has no meaning in tools/perf and because we removed the stubs for it, i.e. we removed the tools/include/linux/export.h file. This fixes the build for the detached tarball sources cases and removes one more source of entanglement with the kernel sources. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-oyqx541o7apa2cskjhcxi6nx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/lib/hweight.c b/tools/lib/hweight.c new file mode 100644 index 0000000..0b859b8 --- /dev/null +++ b/tools/lib/hweight.c @@ -0,0 +1,62 @@ +#include +#include + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +unsigned int __sw_hweight32(unsigned int w) +{ +#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x55555555; + w = (w & 0x33333333) + ((w >> 2) & 0x33333333); + w = (w + (w >> 4)) & 0x0f0f0f0f; + return (w * 0x01010101) >> 24; +#else + unsigned int res = w - ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; +#endif +} + +unsigned int __sw_hweight16(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res + (res >> 4)) & 0x0F0F; + return (res + (res >> 8)) & 0x00FF; +} + +unsigned int __sw_hweight8(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res + (res >> 4)) & 0x0F; +} + +unsigned long __sw_hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return __sw_hweight32((unsigned int)(w >> 32)) + + __sw_hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 +#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x5555555555555555ul; + w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); + w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; + return (w * 0x0101010101010101ul) >> 56; +#else + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; +#endif +#endif +} diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 09dc0aa..d01a0aa 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h tools/arch/x86/include/asm/rmwcc.h tools/lib/traceevent tools/lib/api +tools/lib/hweight.c tools/lib/rbtree.c tools/lib/symbol/kallsyms.c tools/lib/symbol/kallsyms.h @@ -57,7 +58,6 @@ include/linux/perf_event.h include/linux/list.h include/linux/hash.h include/linux/stringify.h -lib/hweight.c include/linux/swab.h arch/*/include/asm/unistd*.h arch/*/include/uapi/asm/unistd*.h diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 601d114..d2d318c 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -143,6 +143,6 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) -$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE +$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index de05e04..0766d98 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -10,7 +10,7 @@ util/ctype.c util/evlist.c util/evsel.c util/cpumap.c -../../lib/hweight.c +../lib/hweight.c util/thread_map.c util/util.c util/xyarray.c -- cgit v0.10.2 From fc0a1f035cf15f704f4092da294963c57e94c1c0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 26 Jun 2015 14:12:10 +0200 Subject: i2c: I2C_MT65XX should depend on HAS_DMA If NO_DMA=y: ERROR: "dma_unmap_single" [drivers/i2c/busses/i2c-mt65xx.ko] undefined! ERROR: "dma_mapping_error" [drivers/i2c/busses/i2c-mt65xx.ko] undefined! ERROR: "dma_map_single" [drivers/i2c/busses/i2c-mt65xx.ko] undefined! Add a dependency on HAS_DMA to fix this. Signed-off-by: Geert Uytterhoeven Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 35ac237..577d58d 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -633,6 +633,7 @@ config I2C_MPC config I2C_MT65XX tristate "MediaTek I2C adapter" depends on ARCH_MEDIATEK || COMPILE_TEST + depends on HAS_DMA help This selects the MediaTek(R) Integrated Inter Circuit bus driver for MT65xx and MT81xx. -- cgit v0.10.2 From 724948106ed236fc528c720ae12c79af7e2aea4e Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 30 Jun 2015 11:08:46 +0800 Subject: i2c: xgene-slimpro: Fix missing mbox_free_channel call in probe error path Free requested mailbox channel before return error. Signed-off-by: Axel Lin Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index dcca707..1c9cb65 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -419,6 +419,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev) rc = i2c_add_adapter(adapter); if (rc) { dev_err(&pdev->dev, "Adapter registeration failed\n"); + mbox_free_channel(ctx->mbox_chan); return rc; } -- cgit v0.10.2 From eb8173e3d7b94664ecd3acf34942c9c9d6f6cb73 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 30 Jun 2015 11:41:58 +0800 Subject: i2c: jz4780: Fix return value if probe fails Current code returns 0 if fails to read clock-frequency DT property, fix it. Also add checking return value of clk_prepare_enable and propagate return value of devm_request_irq. Signed-off-by: Axel Lin Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 19b2d68..f325663 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -764,12 +764,15 @@ static int jz4780_i2c_probe(struct platform_device *pdev) if (IS_ERR(i2c->clk)) return PTR_ERR(i2c->clk); - clk_prepare_enable(i2c->clk); + ret = clk_prepare_enable(i2c->clk); + if (ret) + return ret; - if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", - &clk_freq)) { + ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", + &clk_freq); + if (ret) { dev_err(&pdev->dev, "clock-frequency not specified in DT"); - return clk_freq; + goto err; } i2c->speed = clk_freq / 1000; @@ -790,10 +793,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev) i2c->irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, dev_name(&pdev->dev), i2c); - if (ret) { - ret = -ENODEV; + if (ret) goto err; - } ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { -- cgit v0.10.2 From 4f001fd30145a6a8f72f9544c982cfd3dcb7c6df Mon Sep 17 00:00:00 2001 From: Pantelis Antoniou Date: Sat, 24 Jan 2015 09:16:29 +0200 Subject: i2c: Mark instantiated device nodes with OF_POPULATE Mark (and unmark) device nodes with the POPULATE flag as appropriate. This is required to avoid multi probing when using I2C and device overlays containing a mux. This patch is also more careful with the release of the adapter device which caused a deadlock with muxes, and does not break the build on !OF since the node flag accessors are not defined then. Signed-off-by: Pantelis Antoniou Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 069a41f..e6d4935 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1012,6 +1012,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device); */ void i2c_unregister_device(struct i2c_client *client) { + if (client->dev.of_node) + of_node_clear_flag(client->dev.of_node, OF_POPULATED); device_unregister(&client->dev); } EXPORT_SYMBOL_GPL(i2c_unregister_device); @@ -1320,8 +1322,11 @@ static void of_i2c_register_devices(struct i2c_adapter *adap) dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); - for_each_available_child_of_node(adap->dev.of_node, node) + for_each_available_child_of_node(adap->dev.of_node, node) { + if (of_node_test_and_set_flag(node, OF_POPULATED)) + continue; of_i2c_register_device(adap, node); + } } static int of_dev_node_match(struct device *dev, void *data) @@ -1853,6 +1858,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, if (adap == NULL) return NOTIFY_OK; /* not for us */ + if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { + put_device(&adap->dev); + return NOTIFY_OK; + } + client = of_i2c_register_device(adap, rd->dn); put_device(&adap->dev); @@ -1863,6 +1873,10 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, } break; case OF_RECONFIG_CHANGE_REMOVE: + /* already depopulated? */ + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + /* find our device by node */ client = of_find_i2c_device_by_node(rd->dn); if (client == NULL) -- cgit v0.10.2 From a27b5b97d6fe91f55058ad8ac28a8768700201ab Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 28 Jun 2015 15:16:57 +0200 Subject: hpfs: add fstrim support This patch adds support for fstrim to the HPFS filesystem. Signed-off-by: Mikulas Patocka Signed-off-by: Linus Torvalds diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c index f005046..d6a4b55 100644 --- a/fs/hpfs/alloc.c +++ b/fs/hpfs/alloc.c @@ -484,3 +484,98 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a a->btree.first_free = cpu_to_le16(8); return a; } + +static unsigned find_run(__le32 *bmp, unsigned *idx) +{ + unsigned len; + while (tstbits(bmp, *idx, 1)) { + (*idx)++; + if (unlikely(*idx >= 0x4000)) + return 0; + } + len = 1; + while (!tstbits(bmp, *idx + len, 1)) + len++; + return len; +} + +static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result) +{ + int err; + secno end; + if (fatal_signal_pending(current)) + return -EINTR; + end = start + len; + if (start < limit_start) + start = limit_start; + if (end > limit_end) + end = limit_end; + if (start >= end) + return 0; + if (end - start < minlen) + return 0; + err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0); + if (err) + return err; + *result += end - start; + return 0; +} + +int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result) +{ + int err = 0; + struct hpfs_sb_info *sbi = hpfs_sb(s); + unsigned idx, len, start_bmp, end_bmp; + __le32 *bmp; + struct quad_buffer_head qbh; + + *result = 0; + if (!end || end > sbi->sb_fs_size) + end = sbi->sb_fs_size; + if (start >= sbi->sb_fs_size) + return 0; + if (minlen > 0x4000) + return 0; + if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) { + hpfs_lock(s); + if (s->s_flags & MS_RDONLY) { + err = -EROFS; + goto unlock_1; + } + if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { + err = -EIO; + goto unlock_1; + } + idx = 0; + while ((len = find_run(bmp, &idx)) && !err) { + err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); + idx += len; + } + hpfs_brelse4(&qbh); +unlock_1: + hpfs_unlock(s); + } + start_bmp = start >> 14; + end_bmp = (end + 0x3fff) >> 14; + while (start_bmp < end_bmp && !err) { + hpfs_lock(s); + if (s->s_flags & MS_RDONLY) { + err = -EROFS; + goto unlock_2; + } + if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) { + err = -EIO; + goto unlock_2; + } + idx = 0; + while ((len = find_run(bmp, &idx)) && !err) { + err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result); + idx += len; + } + hpfs_brelse4(&qbh); +unlock_2: + hpfs_unlock(s); + start_bmp++; + } + return err; +} diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 2a8e074..dc540bf 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -327,4 +327,5 @@ const struct file_operations hpfs_dir_ops = .iterate = hpfs_readdir, .release = hpfs_dir_release, .fsync = hpfs_file_fsync, + .unlocked_ioctl = hpfs_ioctl, }; diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 6d8cfe9..7ca28d6 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -203,6 +203,7 @@ const struct file_operations hpfs_file_ops = .release = hpfs_file_release, .fsync = hpfs_file_fsync, .splice_read = generic_file_splice_read, + .unlocked_ioctl = hpfs_ioctl, }; const struct inode_operations hpfs_file_iops = diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index bb04b58..c4867b5 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include "hpfs.h" @@ -200,6 +202,7 @@ void hpfs_free_dnode(struct super_block *, secno); struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); +int hpfs_trim_fs(struct super_block *, u64, u64, u64, unsigned *); /* anode.c */ @@ -318,6 +321,7 @@ __printf(2, 3) void hpfs_error(struct super_block *, const char *, ...); int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); unsigned hpfs_get_free_dnodes(struct super_block *); +long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg); /* * local time (HPFS) to GMT (Unix) diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 7cd00d3..037e3e5 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -196,6 +196,33 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } + +long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + switch (cmd) { + case FITRIM: { + struct fstrim_range range; + secno n_trimmed; + int r; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) + return -EFAULT; + r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed); + if (r) + return r; + range.len = (u64)n_trimmed << 9; + if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) + return -EFAULT; + return 0; + } + default: { + return -ENOIOCTLCMD; + } + } +} + + static struct kmem_cache * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) -- cgit v0.10.2 From d7b04097c250e6322ba73d3b03337074afeeb314 Mon Sep 17 00:00:00 2001 From: Firo Yang Date: Thu, 23 Apr 2015 17:28:45 +0800 Subject: hpfs: Remove unessary cast Avoid a pointless kmem_cache_alloc() return value cast in fs/hpfs/super.c::hpfs_alloc_inode() Signed-off-by: Firo Yang Signed-off-by: Mikulas Patocka Signed-off-by: Linus Torvalds diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 037e3e5..0642516 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -228,7 +228,7 @@ static struct kmem_cache * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) { struct hpfs_inode_info *ei; - ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); + ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; -- cgit v0.10.2 From ce657611baf902f14ae559ce4e0787ead6712067 Mon Sep 17 00:00:00 2001 From: Sanidhya Kashyap Date: Sat, 21 Mar 2015 12:57:50 -0400 Subject: hpfs: kstrdup() out of memory handling There is a possibility of nothing being allocated to the new_opts in case of memory pressure, therefore return ENOMEM for such case. Signed-off-by: Sanidhya Kashyap Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 0642516..cde044d 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -451,11 +451,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) int o; struct hpfs_sb_info *sbi = hpfs_sb(s); char *new_opts = kstrdup(data, GFP_KERNEL); - + + if (!new_opts) + return -ENOMEM; + sync_filesystem(s); *flags |= MS_NOATIME; - + hpfs_lock(s); uid = sbi->sb_uid; gid = sbi->sb_gid; umask = 0777 & ~sbi->sb_mode; -- cgit v0.10.2 From a28e4b2b18ccb90df402da3f21e1a83c9d4f8ec1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 26 Mar 2015 20:47:10 -0700 Subject: hpfs: hpfs_error: Remove static buffer, use vsprintf extension %pV instead Removing unnecessary static buffers is good. Use the vsprintf %pV extension instead. Signed-off-by: Joe Perches Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v2.6.36+ Signed-off-by: Linus Torvalds diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index cde044d..68a9bed 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s) } /* Filesystem error... */ -static char err_buf[1024]; - void hpfs_error(struct super_block *s, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - vsnprintf(err_buf, sizeof(err_buf), fmt, args); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_err("filesystem error: %pV", &vaf); + va_end(args); - pr_err("filesystem error: %s", err_buf); if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { pr_cont("; crashing the system because you wanted it\n"); -- cgit v0.10.2 From 838f13bf4b6737d4aec508558e45f81798fc2677 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 9 Jul 2015 16:39:47 -0400 Subject: blkcg: allow blkcg_pol_mutex to be grabbed from cgroup [file] methods blkcg_pol_mutex primarily protects the blkcg_policy array. It also protects cgroup file type [un]registration during policy addition / removal. This puts blkcg_pol_mutex outside cgroup internal synchronization and in turn makes it impossible to grab from blkcg's cgroup methods as that leads to cyclic dependency. Another problematic dependency arising from this is through cgroup interface file deactivation. Removing a cftype requires removing all files of the type which in turn involves draining all on-going invocations of the file methods. This means that an interface file implementation can't grab blkcg_pol_mutex as draining can lead to AA deadlock. blkcg_reset_stats() is already in this situation. It currently trylocks blkcg_pol_mutex and then unwinds and retries the whole operation on failure, which is cumbersome at best. It has a lengthy comment explaining how cgroup internal synchronization is involved and expected to be updated but as explained above this doesn't need cgroup internal locking to deadlock. It's a self-contained AA deadlock. The described circular dependencies can be easily broken by moving cftype [un]registration out of blkcg_pol_mutex and protect them with an outer mutex. This patch introduces blkcg_pol_register_mutex which wraps entire policy [un]registration including cftype operations and shrinks blkcg_pol_mutex critical section. This also makes the trylock dancing in blkcg_reset_stats() unnecessary. Removed. This patch is necessary for the following blkcg_policy_data allocation bug fixes. Signed-off-by: Tejun Heo Cc: Vivek Goyal Cc: Arianna Avanzini Signed-off-by: Jens Axboe diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5e2723f..2ff74ff 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -29,6 +29,14 @@ #define MAX_KEY_LEN 100 +/* + * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. + * blkcg_pol_register_mutex nests outside of it and synchronizes entire + * policy [un]register operations including cgroup file additions / + * removals. Putting cgroup file registration outside blkcg_pol_mutex + * allows grabbing it from cgroup callbacks. + */ +static DEFINE_MUTEX(blkcg_pol_register_mutex); static DEFINE_MUTEX(blkcg_pol_mutex); struct blkcg blkcg_root; @@ -453,20 +461,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, struct blkcg_gq *blkg; int i; - /* - * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex - * which ends up putting cgroup's internal cgroup_tree_mutex under - * it; however, cgroup_tree_mutex is nested above cgroup file - * active protection and grabbing blkcg_pol_mutex from a cgroup - * file operation creates a possible circular dependency. cgroup - * internal locking is planned to go through further simplification - * and this issue should go away soon. For now, let's trylock - * blkcg_pol_mutex and restart the write on failure. - * - * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com - */ - if (!mutex_trylock(&blkcg_pol_mutex)) - return restart_syscall(); + mutex_lock(&blkcg_pol_mutex); spin_lock_irq(&blkcg->lock); /* @@ -1190,6 +1185,7 @@ int blkcg_policy_register(struct blkcg_policy *pol) if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) return -EINVAL; + mutex_lock(&blkcg_pol_register_mutex); mutex_lock(&blkcg_pol_mutex); /* find an empty slot */ @@ -1198,19 +1194,23 @@ int blkcg_policy_register(struct blkcg_policy *pol) if (!blkcg_policy[i]) break; if (i >= BLKCG_MAX_POLS) - goto out_unlock; + goto err_unlock; /* register and update blkgs */ pol->plid = i; blkcg_policy[i] = pol; + mutex_unlock(&blkcg_pol_mutex); /* everything is in place, add intf files for the new policy */ if (pol->cftypes) WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, pol->cftypes)); - ret = 0; -out_unlock: + mutex_unlock(&blkcg_pol_register_mutex); + return 0; + +err_unlock: mutex_unlock(&blkcg_pol_mutex); + mutex_unlock(&blkcg_pol_register_mutex); return ret; } EXPORT_SYMBOL_GPL(blkcg_policy_register); @@ -1223,7 +1223,7 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register); */ void blkcg_policy_unregister(struct blkcg_policy *pol) { - mutex_lock(&blkcg_pol_mutex); + mutex_lock(&blkcg_pol_register_mutex); if (WARN_ON(blkcg_policy[pol->plid] != pol)) goto out_unlock; @@ -1233,8 +1233,10 @@ void blkcg_policy_unregister(struct blkcg_policy *pol) cgroup_rm_cftypes(pol->cftypes); /* unregister and update blkgs */ + mutex_lock(&blkcg_pol_mutex); blkcg_policy[pol->plid] = NULL; -out_unlock: mutex_unlock(&blkcg_pol_mutex); +out_unlock: + mutex_unlock(&blkcg_pol_register_mutex); } EXPORT_SYMBOL_GPL(blkcg_policy_unregister); -- cgit v0.10.2 From 144232b34258c1fc19729e077c6fb161e30da07b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 9 Jul 2015 16:39:48 -0400 Subject: blkcg: blkcg_css_alloc() should grab blkcg_pol_mutex while iterating blkcg_policy[] An entry in blkcg_policy[] is stable while there are non-bypassing in-flight IOs on a request_queue which has the policy activated. This is why most derefs of blkcg_policy[] don't need explicit locking; however, blkcg_css_alloc() isn't invoked from IO path and thus doesn't have this protection and may race policies being added and removed. Fix it by adding explicit blkcg_pol_mutex protection around blkcg_policy[] iteration in blkcg_css_alloc(). Signed-off-by: Tejun Heo Fixes: e48453c386f3 ("block, cgroup: implement policy-specific per-blkcg data") Cc: Vivek Goyal Cc: Arianna Avanzini Signed-off-by: Jens Axboe diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 2ff74ff..05b893d 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -844,6 +844,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) goto free_blkcg; } + mutex_lock(&blkcg_pol_mutex); + for (i = 0; i < BLKCG_MAX_POLS ; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy_data *cpd; @@ -860,6 +862,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) BUG_ON(blkcg->pd[i]); cpd = kzalloc(pol->cpd_size, GFP_KERNEL); if (!cpd) { + mutex_unlock(&blkcg_pol_mutex); ret = ERR_PTR(-ENOMEM); goto free_pd_blkcg; } @@ -868,6 +871,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) pol->cpd_init_fn(blkcg); } + mutex_unlock(&blkcg_pol_mutex); done: spin_lock_init(&blkcg->lock); INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); -- cgit v0.10.2 From 7876f930d0e78addc6bbdbba0d6c196a0788d545 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 9 Jul 2015 16:39:49 -0400 Subject: blkcg: implement all_blkcgs list Add all_blkcgs list goes through blkcg->all_blkcgs_node and is protected by blkcg_pol_mutex. This will be used to fix blkcg_policy_data allocation bug. Signed-off-by: Tejun Heo Cc: Vivek Goyal Cc: Arianna Avanzini Signed-off-by: Jens Axboe diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 05b893d..42ff436 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -46,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; +static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ + static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { @@ -817,6 +819,10 @@ static void blkcg_css_free(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); + mutex_lock(&blkcg_pol_mutex); + list_del(&blkcg->all_blkcgs_node); + mutex_unlock(&blkcg_pol_mutex); + if (blkcg != &blkcg_root) { int i; @@ -833,6 +839,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) struct cgroup_subsys_state *ret; int i; + mutex_lock(&blkcg_pol_mutex); + if (!parent_css) { blkcg = &blkcg_root; goto done; @@ -844,8 +852,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) goto free_blkcg; } - mutex_lock(&blkcg_pol_mutex); - for (i = 0; i < BLKCG_MAX_POLS ; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy_data *cpd; @@ -862,7 +868,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) BUG_ON(blkcg->pd[i]); cpd = kzalloc(pol->cpd_size, GFP_KERNEL); if (!cpd) { - mutex_unlock(&blkcg_pol_mutex); ret = ERR_PTR(-ENOMEM); goto free_pd_blkcg; } @@ -871,7 +876,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) pol->cpd_init_fn(blkcg); } - mutex_unlock(&blkcg_pol_mutex); done: spin_lock_init(&blkcg->lock); INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); @@ -879,14 +883,17 @@ done: #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&blkcg->cgwb_list); #endif + list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); + + mutex_unlock(&blkcg_pol_mutex); return &blkcg->css; free_pd_blkcg: for (i--; i >= 0; i--) kfree(blkcg->pd[i]); - free_blkcg: kfree(blkcg); + mutex_unlock(&blkcg_pol_mutex); return ret; } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 58cfab8..cf3e7bc 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -47,6 +47,7 @@ struct blkcg { struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; + struct list_head all_blkcgs_node; #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif -- cgit v0.10.2 From 06b285bd11257bccc5a1b85a835507e33656aff2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 9 Jul 2015 16:39:50 -0400 Subject: blkcg: fix blkcg_policy_data allocation bug e48453c386f3 ("block, cgroup: implement policy-specific per-blkcg data") updated per-blkcg policy data to be dynamically allocated. When a policy is registered, its policy data aren't created. Instead, when the policy is activated on a queue, the policy data are allocated if there are blkg's (blkcg_gq's) which are attached to a given blkcg. This is buggy. Consider the following scenario. 1. A blkcg is created. No blkg's attached yet. 2. The policy is registered. No policy data is allocated. 3. The policy is activated on a queue. As the above blkcg doesn't have any blkg's, it won't allocate the matching blkcg_policy_data. 4. An IO is issued from the blkcg and blkg is created and the blkcg still doesn't have the matching policy data allocated. With cfq-iosched, this leads to an oops. It also doesn't free policy data on policy unregistration assuming that freeing of all policy data on blkcg destruction should take care of it; however, this also is incorrect. 1. A blkcg has policy data. 2. The policy gets unregistered but the policy data remains. 3. Another policy gets registered on the same slot. 4. Later, the new policy tries to allocate policy data on the previous blkcg but the slot is already occupied and gets skipped. The policy ends up operating on the policy data of the previous policy. There's no reason to manage blkcg_policy_data lazily. The reason we do lazy allocation of blkg's is that the number of all possible blkg's is the product of cgroups and block devices which can reach a surprising level. blkcg_policy_data is contrained by the number of cgroups and shouldn't be a problem. This patch makes blkcg_policy_data to be allocated for all existing blkcg's on policy registration and freed on unregistration and removes blkcg_policy_data handling from policy [de]activation paths. This makes that blkcg_policy_data are created and removed with the policy they belong to and fixes the above described problems. Signed-off-by: Tejun Heo Fixes: e48453c386f3 ("block, cgroup: implement policy-specific per-blkcg data") Cc: Vivek Goyal Cc: Arianna Avanzini Signed-off-by: Jens Axboe diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 42ff436..9da02c0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1048,10 +1048,8 @@ int blkcg_activate_policy(struct request_queue *q, const struct blkcg_policy *pol) { LIST_HEAD(pds); - LIST_HEAD(cpds); struct blkcg_gq *blkg; struct blkg_policy_data *pd, *nd; - struct blkcg_policy_data *cpd, *cnd; int cnt = 0, ret; if (blkcg_policy_enabled(q, pol)) @@ -1064,10 +1062,7 @@ int blkcg_activate_policy(struct request_queue *q, cnt++; spin_unlock_irq(q->queue_lock); - /* - * Allocate per-blkg and per-blkcg policy data - * for all existing blkgs. - */ + /* allocate per-blkg policy data for all existing blkgs */ while (cnt--) { pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); if (!pd) { @@ -1075,15 +1070,6 @@ int blkcg_activate_policy(struct request_queue *q, goto out_free; } list_add_tail(&pd->alloc_node, &pds); - - if (!pol->cpd_size) - continue; - cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node); - if (!cpd) { - ret = -ENOMEM; - goto out_free; - } - list_add_tail(&cpd->alloc_node, &cpds); } /* @@ -1093,32 +1079,17 @@ int blkcg_activate_policy(struct request_queue *q, spin_lock_irq(q->queue_lock); list_for_each_entry(blkg, &q->blkg_list, q_node) { - if (WARN_ON(list_empty(&pds)) || - WARN_ON(pol->cpd_size && list_empty(&cpds))) { + if (WARN_ON(list_empty(&pds))) { /* umm... this shouldn't happen, just abort */ ret = -ENOMEM; goto out_unlock; } - cpd = list_first_entry(&cpds, struct blkcg_policy_data, - alloc_node); - list_del_init(&cpd->alloc_node); pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); list_del_init(&pd->alloc_node); /* grab blkcg lock too while installing @pd on @blkg */ spin_lock(&blkg->blkcg->lock); - if (!pol->cpd_size) - goto no_cpd; - if (!blkg->blkcg->pd[pol->plid]) { - /* Per-policy per-blkcg data */ - blkg->blkcg->pd[pol->plid] = cpd; - cpd->plid = pol->plid; - pol->cpd_init_fn(blkg->blkcg); - } else { /* must free it as it has already been extracted */ - kfree(cpd); - } -no_cpd: blkg->pd[pol->plid] = pd; pd->blkg = blkg; pd->plid = pol->plid; @@ -1135,8 +1106,6 @@ out_free: blk_queue_bypass_end(q); list_for_each_entry_safe(pd, nd, &pds, alloc_node) kfree(pd); - list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node) - kfree(cpd); return ret; } EXPORT_SYMBOL_GPL(blkcg_activate_policy); @@ -1191,6 +1160,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); */ int blkcg_policy_register(struct blkcg_policy *pol) { + struct blkcg *blkcg; int i, ret; if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) @@ -1207,9 +1177,27 @@ int blkcg_policy_register(struct blkcg_policy *pol) if (i >= BLKCG_MAX_POLS) goto err_unlock; - /* register and update blkgs */ + /* register @pol */ pol->plid = i; - blkcg_policy[i] = pol; + blkcg_policy[pol->plid] = pol; + + /* allocate and install cpd's */ + if (pol->cpd_size) { + list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { + struct blkcg_policy_data *cpd; + + cpd = kzalloc(pol->cpd_size, GFP_KERNEL); + if (!cpd) { + mutex_unlock(&blkcg_pol_mutex); + goto err_free_cpds; + } + + blkcg->pd[pol->plid] = cpd; + cpd->plid = pol->plid; + pol->cpd_init_fn(blkcg); + } + } + mutex_unlock(&blkcg_pol_mutex); /* everything is in place, add intf files for the new policy */ @@ -1219,6 +1207,14 @@ int blkcg_policy_register(struct blkcg_policy *pol) mutex_unlock(&blkcg_pol_register_mutex); return 0; +err_free_cpds: + if (pol->cpd_size) { + list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { + kfree(blkcg->pd[pol->plid]); + blkcg->pd[pol->plid] = NULL; + } + } + blkcg_policy[pol->plid] = NULL; err_unlock: mutex_unlock(&blkcg_pol_mutex); mutex_unlock(&blkcg_pol_register_mutex); @@ -1234,6 +1230,8 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register); */ void blkcg_policy_unregister(struct blkcg_policy *pol) { + struct blkcg *blkcg; + mutex_lock(&blkcg_pol_register_mutex); if (WARN_ON(blkcg_policy[pol->plid] != pol)) @@ -1243,9 +1241,17 @@ void blkcg_policy_unregister(struct blkcg_policy *pol) if (pol->cftypes) cgroup_rm_cftypes(pol->cftypes); - /* unregister and update blkgs */ + /* remove cpds and unregister */ mutex_lock(&blkcg_pol_mutex); + + if (pol->cpd_size) { + list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { + kfree(blkcg->pd[pol->plid]); + blkcg->pd[pol->plid] = NULL; + } + } blkcg_policy[pol->plid] = NULL; + mutex_unlock(&blkcg_pol_mutex); out_unlock: mutex_unlock(&blkcg_pol_register_mutex); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index cf3e7bc..1b62d76 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -89,18 +89,12 @@ struct blkg_policy_data { * Policies that need to keep per-blkcg data which is independent * from any request_queue associated to it must specify its size * with the cpd_size field of the blkcg_policy structure and - * embed a blkcg_policy_data in it. blkcg core allocates - * policy-specific per-blkcg structures lazily the first time - * they are actually needed, so it handles them together with - * blkgs. cpd_init() is invoked to let each policy handle - * per-blkcg data. + * embed a blkcg_policy_data in it. cpd_init() is invoked to let + * each policy handle per-blkcg data. */ struct blkcg_policy_data { /* the policy id this per-policy data belongs to */ int plid; - - /* used during policy activation */ - struct list_head alloc_node; }; /* association between a blk cgroup and a request queue */ -- cgit v0.10.2 From ae0afb4f5d44d17a0fd135ae000e2acf12c53617 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 9 Jul 2015 22:59:49 +0200 Subject: suspend-to-idle: Prevent RCU from complaining about tick_freeze() Put tick_freeze() under RCU_NONIDLE() to prevent RCU from complaining about suspicious RCU usage in idle by trace_suspend_resume() called from there. While at it, fix a comment related to another usage of RCU_NONIDLE() in enter_freeze_proper(). Signed-off-by: Rafael J. Wysocki Reviewed-by: Paul E. McKenney diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index e8e2775..48b7228 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static void enter_freeze_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { - tick_freeze(); + /* + * trace_suspend_resume() called by tick_freeze() for the last CPU + * executing it contains RCU usage regarded as invalid in the idle + * context, so tell RCU about that. + */ + RCU_NONIDLE(tick_freeze()); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping @@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, WARN_ON(!irqs_disabled()); /* * timekeeping_resume() that will be called by tick_unfreeze() for the - * last CPU executing it calls functions containing RCU read-side + * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ RCU_NONIDLE(tick_unfreeze()); -- cgit v0.10.2 From adb3505086def6d3e37013ec6cf8313cc719cbb6 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Wed, 8 Jul 2015 10:49:30 +0530 Subject: net: systemport: Use eth_hw_addr_random Use eth_hw_addr_random() instead of calling random_ether_addr(). Here, this change is setting addr_assign_type to NET_ADDR_RANDOM. The Coccinelle semantic patch that performs this transformation is as follows: @@ identifier a,b; @@ -random_ether_addr(a->b); +eth_hw_addr_random(a); Signed-off-by: Vaishali Thakkar Tested-by: Florian Fainelli Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 909ad7a..4566cdf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) macaddr = of_get_mac_address(dn); if (!macaddr || !is_valid_ether_addr(macaddr)) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); } else { ether_addr_copy(dev->dev_addr, macaddr); } -- cgit v0.10.2 From 5a0266af10246c84a6c712f365788800c7e23fd4 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Wed, 8 Jul 2015 14:35:22 +0530 Subject: drivers: net: cpsw: fix disabling of tx interrupt in rx isr In commit 'c03abd84634d ("net: ethernet: cpsw: don't requests IRQs we don't use")', common isr is split into tx and rx, but in rx isr tx interrupt is also disabledi in cpsw_disable_irq(). So tx interrupts are not handled during rx interrupts and rx napi completion and results in poor tx performance by 40Mbps. Fixing by disabling only rx interrupt in rx isr. Cc: Felipe Balbi Cc: # v4.0+ Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e778703..f335bf1 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -138,19 +138,6 @@ do { \ #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) -#define cpsw_enable_irq(priv) \ - do { \ - u32 i; \ - for (i = 0; i < priv->num_irqs; i++) \ - enable_irq(priv->irqs_table[i]); \ - } while (0) -#define cpsw_disable_irq(priv) \ - do { \ - u32 i; \ - for (i = 0; i < priv->num_irqs; i++) \ - disable_irq_nosync(priv->irqs_table[i]); \ - } while (0) - #define cpsw_slave_index(priv) \ ((priv->data.dual_emac) ? priv->emac_port : \ priv->data.active_slave) @@ -783,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) cpsw_intr_disable(priv); if (priv->irq_enabled == true) { - cpsw_disable_irq(priv); + disable_irq_nosync(priv->irqs_table[0]); priv->irq_enabled = false; } @@ -819,7 +806,7 @@ static int cpsw_poll(struct napi_struct *napi, int budget) prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { prim_cpsw->irq_enabled = true; - cpsw_enable_irq(priv); + enable_irq(priv->irqs_table[0]); } } @@ -1335,7 +1322,7 @@ static int cpsw_ndo_open(struct net_device *ndev) if (prim_cpsw->irq_enabled == false) { if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { prim_cpsw->irq_enabled = true; - cpsw_enable_irq(prim_cpsw); + enable_irq(prim_cpsw->irqs_table[0]); } } -- cgit v0.10.2 From 4a0e3e989d66bb7204b163d9cfaa7fa96d0f2023 Mon Sep 17 00:00:00 2001 From: Enrico Mioso Date: Wed, 8 Jul 2015 13:05:57 +0200 Subject: cdc_ncm: Add support for moving NDP to end of NCM frame NCM specs are not actually mandating a specific position in the frame for the NDP (Network Datagram Pointer). However, some Huawei devices will ignore our aggregates if it is not placed after the datagrams it points to. Add support for doing just this, in a per-device configurable way. While at it, update NCM subdrivers, disabling this functionality in all of them, except in huawei_cdc_ncm where it is enabled instead. We aren't making any distinction between different Huawei NCM devices, based on what the vendor driver does. Standard NCM devices are left unaffected: if they are compliant, they should be always usable, still stay on the safe side. This change has been tested and working with a Huawei E3131 device (which works regardless of NDP position), a Huawei E3531 (also working both ways) and an E3372 (which mandates NDP to be after indexed datagrams). V1->V2: - corrected wrong NDP acronym definition - fixed possible NULL pointer dereference - patch cleanup V2->V3: - Properly account for the NDP size when writing new packets to SKB Signed-off-by: Enrico Mioso Signed-off-by: David S. Miller diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index e4b7a47..efc18e0 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; - ret = cdc_ncm_bind_common(dev, intf, data_altsetting); + ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); if (ret) goto err; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8067b8f..1991e4a 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) ctx->tx_curr_skb = NULL; } + kfree(ctx->delayed_ndp16); + kfree(ctx); } -int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) +int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) { const struct usb_cdc_union_desc *union_desc = NULL; struct cdc_ncm_ctx *ctx; @@ -855,6 +857,17 @@ advance: /* finish setting up the device specific data */ cdc_ncm_setup(dev); + /* Device-specific flags */ + ctx->drvflags = drvflags; + + /* Allocate the delayed NDP if needed. */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { + ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); + if (!ctx->delayed_ndp16) + goto error2; + dev_info(&intf->dev, "NDP will be placed at end of frame for this device."); + } + /* override ethtool_ops */ dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; @@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) return -ENODEV; - /* The NCM data altsetting is fixed */ - ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); + /* The NCM data altsetting is fixed, so we hard-coded it. + * Additionally, generic NCM devices are assumed to accept arbitrarily + * placed NDP. + */ + ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); /* * We should get an event when network connection is "connected" or @@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data; size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex); + /* If NDP should be moved to the end of the NCM package, we can't follow the + * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and + * the wNdpIndex field in the header is actually not consistent with reality. It will be later. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) + if (ctx->delayed_ndp16->dwSignature == sign) + return ctx->delayed_ndp16; + /* follow the chain of NDPs, looking for a match */ while (ndpoffset) { ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); @@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ } /* align new NDP */ - cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); + if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) + cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); /* verify that there is room for the NDP and the datagram (reserve) */ if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) @@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ nth16->wNdpIndex = cpu_to_le16(skb->len); /* push a new empty NDP */ - ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); + if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) + ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); + else + ndp16 = ctx->delayed_ndp16; + ndp16->dwSignature = sign; ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); return ndp16; @@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) struct sk_buff *skb_out; u16 n = 0, index, ndplen; u8 ready2send = 0; + u32 delayed_ndp_size; + + /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated + * accordingly. Otherwise, we should check here. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) + delayed_ndp_size = ctx->max_ndp_size; + else + delayed_ndp_size = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) { @@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); /* check if we had enough room left for both NDP and frame */ - if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { + if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) { if (n == 0) { /* won't fit, MTU problem? */ dev_kfree_skb_any(skb); @@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) /* variables will be reset at next call */ } + /* If requested, put NDP at end of frame. */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { + nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max); + nth16->wNdpIndex = cpu_to_le16(skb_out->len); + memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size); + + /* Zero out delayed NDP - signature checking will naturally fail. */ + ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size); + } + /* If collected data size is less or equal ctx->min_tx_pkt * bytes, we send buffers as it is. If we get more data, it * would be more efficient for USB HS mobile device with DMA diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 735f7da..2680a65 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev, struct usb_driver *subdriver = ERR_PTR(-ENODEV); int ret = -ENODEV; struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data; + int drvflags = 0; /* altsetting should always be 1 for NCM devices - so we hard-coded - * it here + * it here. Some huawei devices will need the NDP part of the NCM package to + * be at the end of the frame. */ - ret = cdc_ncm_bind_common(usbnet_dev, intf, 1); + drvflags |= CDC_NCM_FLAG_NDP_TO_END; + ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); if (ret) goto err; diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 7c9b484..1f6526c 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -80,6 +80,9 @@ #define CDC_NCM_TIMER_INTERVAL_MIN 5UL #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) +/* Driver flags */ +#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ + #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) @@ -103,9 +106,11 @@ struct cdc_ncm_ctx { spinlock_t mtx; atomic_t stop; + int drvflags; u32 timer_interval; u32 max_ndp_size; + struct usb_cdc_ncm_ndp16 *delayed_ndp16; u32 tx_timer_pending; u32 tx_curr_frame_num; @@ -133,7 +138,7 @@ struct cdc_ncm_ctx { }; u8 cdc_ncm_select_altsetting(struct usb_interface *intf); -int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); +int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); -- cgit v0.10.2 From fecdf8be2d91e04b0a9a4f79ff06499a36f5d14f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 8 Jul 2015 21:42:11 +0200 Subject: net: pktgen: fix race between pktgen_thread_worker() and kthread_stop() pktgen_thread_worker() is obviously racy, kthread_stop() can come between the kthread_should_stop() check and set_current_state(). Signed-off-by: Oleg Nesterov Reported-by: Jan Stancek Reported-by: Marcelo Leitner Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 05badbb..41489e3 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3572,8 +3572,10 @@ static int pktgen_thread_worker(void *arg) pktgen_rem_thread(t); /* Wait for kthread_stop */ - while (!kthread_should_stop()) { + for (;;) { set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; schedule(); } __set_current_state(TASK_RUNNING); -- cgit v0.10.2 From 1fbe4b46caca5b01b070af93d513031ffbcc480c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 8 Jul 2015 21:42:13 +0200 Subject: net: pktgen: kill the "Wait for kthread_stop" code in pktgen_thread_worker() pktgen_thread_worker() doesn't need to wait for kthread_stop(), it can simply exit. Just pktgen_create_thread() and pg_net_exit() should do get_task_struct()/put_task_struct(). kthread_stop(dead_thread) is fine. Signed-off-by: Oleg Nesterov Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 41489e3..1ebdf1c 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3571,15 +3571,6 @@ static int pktgen_thread_worker(void *arg) pr_debug("%s removing thread\n", t->tsk->comm); pktgen_rem_thread(t); - /* Wait for kthread_stop */ - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) - break; - schedule(); - } - __set_current_state(TASK_RUNNING); - return 0; } @@ -3771,6 +3762,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) } t->net = pn; + get_task_struct(p); wake_up_process(p); wait_for_completion(&t->start_done); @@ -3893,6 +3885,7 @@ static void __net_exit pg_net_exit(struct net *net) t = list_entry(q, struct pktgen_thread, th_list); list_del(&t->th_list); kthread_stop(t->tsk); + put_task_struct(t->tsk); kfree(t); } -- cgit v0.10.2 From 35afd02e30d6368073df604920c4ea7cddadf5d0 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 10 Jul 2015 01:36:27 +0200 Subject: cpufreq: Initialize the governor again while restoring policy When all CPUs of a policy are hot-unplugged, we EXIT the governor but don't mark policy->governor as NULL. This was done in order to keep last used governor's information intact in sysfs, while the CPUs are offline. But we also need to clear policy->governor when restoring the policy. Because policy->governor still points to the last governor while policy is restored, following sequence of event happens: - cpufreq_init_policy() called while restoring policy - find_governor() matches last_governor string for present governors and returns last used governor's pointer, say ondemand. policy->governor already has the same address, unless the governor was removed in between. - cpufreq_set_policy() is called with both old/new policies governor set as ondemand. - Because governors matched, we skip governor initialization and return after calling __cpufreq_governor(CPUFREQ_GOV_LIMITS). Because the governor wasn't initialized for this policy, it returned -EBUSY. - cpufreq_init_policy() exits the policy on this error, but doesn't destroy it properly (should be fixed separately). - And so we enter a scenario where the policy isn't completely initialized but used. Fix this by setting policy->governor to NULL while restoring the policy. Reported-and-tested-by: Pi-Cheng Chen Reported-and-tested-by: "Jon Medhurst (Tixy)" Reported-and-tested-by: Steven Rostedt Fixes: 18bf3a124ef8 (cpufreq: Mark policy->governor = NULL for inactive policies) Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b612411..2c22e39 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1132,6 +1132,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) down_write(&policy->rwsem); policy->cpu = cpu; + policy->governor = NULL; up_write(&policy->rwsem); } -- cgit v0.10.2 From 5a31d594a9732a2fa2eb83b0c4dcba75da2dff5d Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 10 Jul 2015 01:43:27 +0200 Subject: cpufreq: Allow freq_table to be obtained for offline CPUs Users of freq table may want to access it for any CPU from policy->related_cpus mask. One such user is cpu-cooling layer. It gets a list of 'clip_cpus' (equivalent to policy->related_cpus) during registration and tries to get freq_table for the first CPU of this mask. If the CPU, for which it tries to fetch freq_table, is offline, cpufreq_frequency_get_table() fails. This happens because it relies on cpufreq_cpu_get_raw() for its functioning which returns policy only for online CPUs. The fix is to access the policy data structure for the given CPU directly (which also returns a valid policy for offline CPUs), but the policy itself has to be active (meaning that at least one CPU using it is online) for the frequency table to be returned. Because we will be using 'cpufreq_cpu_data' now, which is internal to the cpufreq core, move cpufreq_frequency_get_table() to cpufreq.c. Reported-and-tested-by: Pi-Cheng Chen Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2c22e39..26063af 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(get_governor_parent_kobj); +struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) +{ + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + return policy && !policy_is_inactive(policy) ? + policy->freq_table : NULL; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); + static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index df14766..dfbbf981 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); -struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); - -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - return policy ? policy->freq_table : NULL; -} -EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); - MODULE_AUTHOR("Dominik Brodowski "); MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 1fb01ca93a1348a1469b8777326cd7632483de77 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Wed, 8 Jul 2015 15:26:39 +0800 Subject: ACPI / PCI: Fix regressions caused by resource_size_t overflow with 32-bit kernel Zoltan Boszormenyi reported this regression: "There's a Realtek RTL8111/8168/8411 (PCI ID 10ec:8168, Subsystem ID 1565:230e) network chip on the mainboard. After the r8169 driver loaded the IRQs in the machine went berserk. Keyboard keypressed arrived with considerable latency and duplicated, so no real work was possible. The machine responded to the power button but didn't actually power down. It just stuck at the powering down message. I had to press the power button for 4 seconds to power it down. The computer is a POS machine with a big battery inside. Because of this, either ACPI or the Realtek chip kept the bad state and after rebooting, the network chip didn't even show up in lspci. Not even the PXE ROM announced itself during boot. I had to disconnect the battery to beat some sense back to the computer. The regression happens with 4.0.5, 4.1.0-rc8 and 4.1.0-final. 3.18.16 was good." The regression is caused by commit 593669c2ac0f (x86/PCI/ACPI: Use common ACPI resource interfaces to simplify implementation). Since commit 593669c2ac0f, x86 PCI ACPI host bridge driver validates ACPI resources by first converting an ACPI resource to a 'struct resource' structure and then applying checks against the converted resource structure. The 'start' and 'end' fields in 'struct resource' are defined to be type of resource_size_t, which may be 32 bits or 64 bits depending on CONFIG_PHYS_ADDR_T_64BIT. This may cause incorrect resource validation results with 32-bit kernels because 64-bit ACPI resource descriptors may get truncated when converting to 32-bit 'start' and 'end' fields in 'struct resource'. It eventually affects PCI resource allocation subsystem and makes some PCI devices and the system behave abnormally due to incorrect resource assignment. So enhance the ACPI resource parsing interfaces to ignore ACPI resource descriptors with address/offset above 4G when running in 32-bit mode. With the fix applied, the behavior of the machine was restored to how 3.18.16 worked, i.e. the memory range that is over 4GB is ignored again, and lspci -vvxxx shows that everything is at the same memory window as they were with 3.18.16. Reported-and-tested-by: Boszormenyi Zoltan Fixes: 593669c2ac0f (x86/PCI/ACPI: Use common ACPI resource interfaces to simplify implementation) Signed-off-by: Jiang Liu Cc: 4.0+ # 4.0+ Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 10561ce..e8d2817 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -194,6 +194,7 @@ static bool acpi_decode_space(struct resource_win *win, u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; bool wp = addr->info.mem.write_protect; u64 len = attr->address_length; + u64 start, end, offset = 0; struct resource *res = &win->res; /* @@ -205,9 +206,6 @@ static bool acpi_decode_space(struct resource_win *win, pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", addr->min_address_fixed, addr->max_address_fixed, len); - res->start = attr->minimum; - res->end = attr->maximum; - /* * For bridges that translate addresses across the bridge, * translation_offset is the offset that must be added to the @@ -215,12 +213,22 @@ static bool acpi_decode_space(struct resource_win *win, * primary side. Non-bridge devices must list 0 for all Address * Translation offset bits. */ - if (addr->producer_consumer == ACPI_PRODUCER) { - res->start += attr->translation_offset; - res->end += attr->translation_offset; - } else if (attr->translation_offset) { + if (addr->producer_consumer == ACPI_PRODUCER) + offset = attr->translation_offset; + else if (attr->translation_offset) pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", attr->translation_offset); + start = attr->minimum + offset; + end = attr->maximum + offset; + + win->offset = offset; + res->start = start; + res->end = end; + if (sizeof(resource_size_t) < sizeof(u64) && + (offset != win->offset || start != res->start || end != res->end)) { + pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n", + attr->minimum, attr->maximum); + return false; } switch (addr->resource_type) { @@ -237,8 +245,6 @@ static bool acpi_decode_space(struct resource_win *win, return false; } - win->offset = attr->translation_offset; - if (addr->producer_consumer == ACPI_PRODUCER) res->flags |= IORESOURCE_WINDOW; -- cgit v0.10.2 From 2c069a118fe1d80c47dca84e1561045fc7f3cc9e Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Fri, 10 Jul 2015 09:04:25 +1000 Subject: cxl: Check if afu is not null in cxl_slbia The pointer to an AFU in the adapter's list of AFUs can be null if we're in the process of removing AFUs. The afu_list_lock doesn't guard against this. Say we have 2 slices, and we're in the process of removing cxl. - We remove the AFUs in order (see cxl_remove). In cxl_remove_afu for AFU 0, we take the lock, set adapter->afu[0] = NULL, and release the lock. - Then we get an slbia. In cxl_slbia we take the lock, and set afu = adapter->afu[0], which is NULL. - Therefore our attempt to check afu->enabled will blow up. Therefore, check if afu is a null pointer before dereferencing it. Cc: stable@vger.kernel.org Signed-off-by: Daniel Axtens Acked-by: Michael Neuling Acked-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 833348e..4a164ab 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm) spin_lock(&adapter->afu_list_lock); for (slice = 0; slice < adapter->slices; slice++) { afu = adapter->afu[slice]; - if (!afu->enabled) + if (!afu || !afu->enabled) continue; rcu_read_lock(); idr_for_each_entry(&afu->contexts_idr, ctx, id) -- cgit v0.10.2 From fcc028c106e5750aa66dc43595b3f29f88801ff0 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 9 Jul 2015 22:21:20 +0900 Subject: net: axienet: Fix devm_ioremap_resource return value check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Value returned by devm_ioremap_resource() was checked for non-NULL but devm_ioremap_resource() returns IOMEM_ERR_PTR, not NULL. In case of error this could lead to dereference of ERR_PTR. Signed-off-by: Krzysztof Kozlowski Cc: Fixes: 46aa27df8853 ("net: axienet: Use devm_* calls") Reviewed-by: Sören Brinkmann Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 4208dd7..d95f9aa 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev) /* Map device registers */ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); lp->regs = devm_ioremap_resource(&pdev->dev, ethres); - if (!lp->regs) { + if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); - ret = -ENOMEM; + ret = PTR_ERR(lp->regs); goto free_netdev; } @@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev) goto free_netdev; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); - if (!lp->dma_regs) { + if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); - ret = -ENOMEM; + ret = PTR_ERR(lp->dma_regs); goto free_netdev; } lp->rx_irq = irq_of_parse_and_map(np, 1); -- cgit v0.10.2 From a7d35f9d73e9ffa74a02304b817e579eec632f67 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 9 Jul 2015 18:56:07 +0200 Subject: bridge: fix potential crash in __netdev_pick_tx() Commit c29390c6dfee ("xps: must clear sender_cpu before forwarding") fixed an issue in normal forward path, caused by sender_cpu & napi_id skb fields being an union. Bridge is another point where skb can be forwarded, so we need the same cure. Bug triggers if packet was received on a NIC using skb_mark_napi_id() Fixes: 2bd82484bb4c ("xps: fix xps for stacked devices") Signed-off-by: Eric Dumazet Reported-by: Bob Liu Tested-by: Bob Liu Signed-off-by: David S. Miller diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index e97572b..0ff6e1b 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) } else { skb_push(skb, ETH_HLEN); br_drop_fake_rtable(skb); + skb_sender_cpu_clear(skb); dev_queue_xmit(skb); } -- cgit v0.10.2 From a833581e372a4adae2319d8dc379493edbc444e9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Jul 2015 19:23:38 +0200 Subject: x86, perf: Fix static_key bug in load_mm_cr4() Mikulas reported his K6-3 not booting. This is because the static_key API confusion struck and bit Andy, this wants to be static_key_false(). Reported-by: Mikulas Patocka Tested-by: Mikulas Patocka Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Andrea Arcangeli Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Kees Cook Cc: Linus Torvalds Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Valdis Kletnieks Cc: Vince Weaver Cc: hillf.zj Fixes: a66734297f78 ("perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks") Link: http://lkml.kernel.org/r/20150709172338.GC19282@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 5e8daee..804a3a6 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available; static inline void load_mm_cr4(struct mm_struct *mm) { - if (static_key_true(&rdpmc_always_available) || + if (static_key_false(&rdpmc_always_available) || atomic_read(&mm->context.perf_rdpmc_allowed)) cr4_set_bits(X86_CR4_PCE); else -- cgit v0.10.2 From cccf34e9411c41b0cbfb41980fe55fc8e7c98fd2 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 10 Jul 2015 09:29:10 +0100 Subject: MIPS: c-r4k: Fix cache flushing for MT cores MT_SMP is not the only SMP option for MT cores. The MT_SMP option allows more than one VPE per core to appear as a secondary CPU in the system. Because of how CM works, it propagates the address-based cache ops to the secondary cores but not the index-based ones. Because of that, the code does not use IPIs to flush the L1 caches on secondary cores because the CM would have done that already. However, the CM functionality is independent of the type of SMP kernel so even in non-MT kernels, IPIs are not necessary. As a result of which, we change the conditional to depend on the CM presence. Moreover, since VPEs on the same core share the same L1 caches, there is no need to send an IPI on all of them so we calculate a suitable cpumask with only one VPE per core. Signed-off-by: Markos Chandras Cc: # 3.15+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10654/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 2b25d1b..16f1ea9 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -23,6 +23,7 @@ extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; +extern cpumask_t cpu_foreign_map; #define raw_smp_processor_id() (current_thread_info()->cpu) diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index faa46eb..d0744cc 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +/* + * A logcal cpu mask containing only one VPE per core to + * reduce the number of IPIs on large MT systems. + */ +cpumask_t cpu_foreign_map __read_mostly; +EXPORT_SYMBOL(cpu_foreign_map); + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu) } } +/* + * Calculate a new cpu_foreign_map mask whenever a + * new cpu appears or disappears. + */ +static inline void calculate_cpu_foreign_map(void) +{ + int i, k, core_present; + cpumask_t temp_foreign_map; + + /* Re-calculate the mask */ + for_each_online_cpu(i) { + core_present = 0; + for_each_cpu(k, &temp_foreign_map) + if (cpu_data[i].package == cpu_data[k].package && + cpu_data[i].core == cpu_data[k].core) + core_present = 1; + if (!core_present) + cpumask_set_cpu(i, &temp_foreign_map); + } + + cpumask_copy(&cpu_foreign_map, &temp_foreign_map); +} + struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); @@ -146,6 +176,8 @@ asmlinkage void start_secondary(void) set_cpu_sibling_map(cpu); set_cpu_core_map(cpu); + calculate_cpu_foreign_map(); + cpumask_set_cpu(cpu, &cpu_callin_map); synchronise_count_slave(cpu); @@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void) static void stop_this_cpu(void *dummy) { /* - * Remove this CPU: + * Remove this CPU. Be a bit slow here and + * set the bits for every online CPU so we don't miss + * any IPI whilst taking this VPE down. */ + + cpumask_copy(&cpu_foreign_map, cpu_online_mask); + + /* Make it visible to every other CPU */ + smp_mb(); + set_cpu_online(smp_processor_id(), false); + calculate_cpu_foreign_map(); local_irq_disable(); while (1); } @@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); set_cpu_core_map(0); + calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7f660dc..a5974dd 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -37,6 +37,7 @@ #include /* for run_uncached() */ #include #include +#include /* * Special Variant of smp_call_function for use by cache functions: @@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) { preempt_disable(); -#ifndef CONFIG_MIPS_MT_SMP - smp_call_function(func, info, 1); -#endif + /* + * The Coherent Manager propagates address-based cache ops to other + * cores but not index-based ops. However, r4k_on_each_cpu is used + * in both cases so there is no easy way to tell what kind of op is + * executed to the other cores. The best we can probably do is + * to restrict that call when a CM is not present because both + * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops. + */ + if (!mips_cm_present()) + smp_call_function_many(&cpu_foreign_map, func, info, 1); func(info); preempt_enable(); } -- cgit v0.10.2 From 6249ecbbb75cd635025cc681fcf51fb8659edbab Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 17 Apr 2015 10:44:15 +0100 Subject: MIPS: Malta: Make GIC FDC IRQ workaround Malta specific Wider testing reveals that the Fast Debug Channel (FDC) interrupt is routed through the GIC just fine on Pistachio SoC, even though it contains interAptiv cores. Clearly the FDC interrupt routing problems previously observed on interAptiv and proAptiv cores are specific to the Malta FPGA bitstreams. Move the workaround for interAptiv and proAptiv out of gic_get_c0_fdc_int() in the GIC irqchip driver into Malta's get_c0_fdc_int() platform callback, to allow the Pistachio SoC to use the FDC interrupt. Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Andrew Bresticker Cc: Thomas Gleixner Cc: Jason Cooper Cc: linux-mips@linux-mips.org Reviewed-by: Andrew Bresticker Cc: James Hartley Patchwork: http://patchwork.linux-mips.org/patch/9748/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 185e682..5625b19 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts) int get_c0_fdc_int(void) { - int mips_cpu_fdc_irq; + /* + * Some cores claim the FDC is routable through the GIC, but it doesn't + * actually seem to be connected for those Malta bitstreams. + */ + switch (current_cpu_type()) { + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + return -1; + }; if (cpu_has_veic) - mips_cpu_fdc_irq = -1; + return -1; else if (gic_present) - mips_cpu_fdc_irq = gic_get_c0_fdc_int(); + return gic_get_c0_fdc_int(); else if (cp0_fdc_irq >= 0) - mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; + return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; else - mips_cpu_fdc_irq = -1; - - return mips_cpu_fdc_irq; + return -1; } int get_c0_perfcount_int(void) diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 4400edd..b7d54d4 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void) return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; } - /* - * Some cores claim the FDC is routable but it doesn't actually seem to - * be connected. - */ - switch (current_cpu_type()) { - case CPU_INTERAPTIV: - case CPU_PROAPTIV: - return -1; - } - return irq_create_mapping(gic_irq_domain, GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -- cgit v0.10.2 From 6b5e741e9a834a8cf2d5b895319045ab17ad37fe Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 17 Apr 2015 10:44:16 +0100 Subject: MIPS: Pistachio: Support CDMM & Fast Debug Channel Implement the mips_cdmm_phys_base() platform callback to provide a default Common Device Memory Map (CDMM) physical base address for the Pistachio SoC. This allows the CDMM in each VPE to be configured and probed for devices, such as the Fast Debug Channel (FDC). The physical address chosen is just below the default CPC address, which appears to also be unallocated. The FDC IRQ is also usable on Pistachio, and is routed through the GIC, so implement the get_c0_fdc_int() platform callback using gic_get_c0_fdc_int(), so the FDC driver doesn't have to fall back to polling. Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Andrew Bresticker Cc: James Hartley Cc: linux-mips@linux-mips.org Reviewed-by: Andrew Bresticker Patchwork: http://patchwork.linux-mips.org/patch/9749/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c index d2dc836..8bd8ebb 100644 --- a/arch/mips/pistachio/init.c +++ b/arch/mips/pistachio/init.c @@ -63,13 +63,19 @@ void __init plat_mem_setup(void) plat_setup_iocoherency(); } -#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 +#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 +#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000 phys_addr_t mips_cpc_default_phys_base(void) { return DEFAULT_CPC_BASE_ADDR; } +phys_addr_t mips_cdmm_phys_base(void) +{ + return DEFAULT_CDMM_BASE_ADDR; +} + static void __init mips_nmi_setup(void) { void *base; diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 67889fc..7c73fcb 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -27,6 +27,11 @@ int get_c0_perfcount_int(void) return gic_get_c0_perfcount_int(); } +int get_c0_fdc_int(void) +{ + return gic_get_c0_fdc_int(); +} + void __init plat_time_init(void) { struct device_node *np; -- cgit v0.10.2 From 1e18ac7aeaec357048172695b1fbb461205b166f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 9 Jul 2015 10:40:41 +0100 Subject: MIPS: c-r4k: Extend way_string array The L2 cache in the I6400 core has 16 ways, so extend the way_string array to take such caches into account. [ralf@linux-mips.org: Other already supported CPUs are free to support more than 8 ways of cache as well.] Signed-off-by: Paul Burton Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10640/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index a5974dd..fbea443 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -945,7 +945,9 @@ static void b5k_instruction_hazard(void) } static char *way_string[] = { NULL, "direct mapped", "2-way", - "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" + "3-way", "4-way", "5-way", "6-way", "7-way", "8-way", + "9-way", "10-way", "11-way", "12-way", + "13-way", "14-way", "15-way", "16-way", }; static void probe_pcache(void) -- cgit v0.10.2 From 51d53674c3fff11e9231e66ca8616c65deadfb6c Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 9 Jul 2015 18:02:51 +0200 Subject: MIPS: O32: Use compat_sys_getsockopt. We were using the native syscall and that results in subtle breakage. This is the same issue as fixed in 077d0e65618f27b2199d622e12ada6d8f3dbd862 (MIPS: N32: Use compat getsockopt syscall) but that commit did fix it only for N32. Signed-off-by: Ralf Baechle Link: https://bugzilla.kernel.org/show_bug.cgi?id=100291 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 66d618b..f543ff4 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -400,7 +400,7 @@ EXPORT(sys32_call_table) PTR sys_connect /* 4170 */ PTR sys_getpeername PTR sys_getsockname - PTR sys_getsockopt + PTR compat_sys_getsockopt PTR sys_listen PTR compat_sys_recv /* 4175 */ PTR compat_sys_recvfrom -- cgit v0.10.2 From 553a59fc8f5d51c3824c0b7d4ca61e780157defa Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 19 May 2015 16:28:12 +0200 Subject: drm/imx: parallel-display: fix drm_panel support The parallel-display driver used an undocumented, non-standard property "fsl,panel" to optionally associate with a drm_panel device. This patch fixes the driver to use the same OF graph bindings as the LDB driver instead: parallel-display { compatible = "fsl,imx-parallel-display"; ... port@1 { reg = <1>; parallel_out: endpoint { remote_endpoint = <&panel_in>; }; }; }; panel { ... port { panel_in: endpoint { remote-endpoint = <¶llel_out>; }; }; }; Signed-off-by: Philipp Zabel Tested-by: Gary Bisson diff --git a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt index e75f0e5..971c3ee 100644 --- a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt +++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt @@ -65,8 +65,10 @@ Optional properties: - edid: verbatim EDID data block describing attached display. - ddc: phandle describing the i2c bus handling the display data channel -- port: A port node with endpoint definitions as defined in +- port@[0-1]: Port nodes with endpoint definitions as defined in Documentation/devicetree/bindings/media/video-interfaces.txt. + Port 0 is the input port connected to the IPU display interface, + port 1 is the output port connected to a panel. example: @@ -75,9 +77,29 @@ display@di0 { edid = [edid-data]; interface-pix-fmt = "rgb24"; - port { + port@0 { + reg = <0>; + display_in: endpoint { remote-endpoint = <&ipu_di0_disp0>; }; }; + + port@1 { + reg = <1>; + + display_out: endpoint { + remote-endpoint = <&panel_in>; + }; + }; +}; + +panel { + ... + + port { + panel_in: endpoint { + remote-endpoint = <&display_out>; + }; + }; }; diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 74a9ce4..b4deb9c 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -21,6 +21,7 @@ #include #include #include