From 21f79ae43fd2d498847ff82161027a62a1269926 Mon Sep 17 00:00:00 2001 From: Vineeth Remanan Pillai Date: Tue, 7 Feb 2017 18:59:01 +0000 Subject: xen-netfront: Rework the fix for Rx stall during OOM and network stress commit 538d92912d3190a1dd809233a0d57277459f37b2 upstream. The commit 90c311b0eeea ("xen-netfront: Fix Rx stall during network stress and OOM") caused the refill timer to be triggerred almost on all invocations of xennet_alloc_rx_buffers for certain workloads. This reworks the fix by reverting to the old behaviour and taking into consideration the skb allocation failure. Refill timer is now triggered on insufficient requests or skb allocation failure. Signed-off-by: Vineeth Remanan Pillai Fixes: 90c311b0eeea (xen-netfront: Fix Rx stall during network stress and OOM) Reported-by: Boris Ostrovsky Reviewed-by: Boris Ostrovsky Signed-off-by: David S. Miller Cc: Eduardo Valentin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 599cf50..cd442e4 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) { RING_IDX req_prod = queue->rx.req_prod_pvt; int notify; + int err = 0; if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; @@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) struct xen_netif_rx_request *req; skb = xennet_alloc_one_rx_buffer(queue); - if (!skb) + if (!skb) { + err = -ENOMEM; break; + } id = xennet_rxidx(req_prod); @@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx.req_prod_pvt = req_prod; - /* Not enough requests? Try again later. */ - if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { + /* Try again later if there are not enough requests or skb allocation + * failed. + * Enough requests is quantified as the sum of newly created slots and + * the unconsumed slots at the backend. + */ + if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || + unlikely(err)) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } -- cgit v0.10.2 From 13550ffc9515d234c43d79ef4a87328e88182baa Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 10 Feb 2017 10:31:49 -0800 Subject: net_sched: fix error recovery at qdisc creation commit 87b60cfacf9f17cf71933c6e33b66e68160af71d upstream. Dmitry reported uses after free in qdisc code [1] The problem here is that ops->init() can return an error. qdisc_create_dflt() then call ops->destroy(), while qdisc_create() does _not_ call it. Four qdisc chose to call their own ops->destroy(), assuming their caller would not. This patch makes sure qdisc_create() calls ops->destroy() and fixes the four qdisc to avoid double free. [1] BUG: KASAN: use-after-free in mq_destroy+0x242/0x290 net/sched/sch_mq.c:33 at addr ffff8801d415d440 Read of size 8 by task syz-executor2/5030 CPU: 0 PID: 5030 Comm: syz-executor2 Not tainted 4.3.5-smp-DEV #119 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 0000000000000046 ffff8801b435b870 ffffffff81bbbed4 ffff8801db000400 ffff8801d415d440 ffff8801d415dc40 ffff8801c4988510 ffff8801b435b898 ffffffff816682b1 ffff8801b435b928 ffff8801d415d440 ffff8801c49880c0 Call Trace: [] __dump_stack lib/dump_stack.c:15 [inline] [] dump_stack+0x6c/0x98 lib/dump_stack.c:51 [] kasan_object_err+0x21/0x70 mm/kasan/report.c:158 [] print_address_description mm/kasan/report.c:196 [inline] [] kasan_report_error+0x1b4/0x4b0 mm/kasan/report.c:285 [] kasan_report mm/kasan/report.c:305 [inline] [] __asan_report_load8_noabort+0x43/0x50 mm/kasan/report.c:326 [] mq_destroy+0x242/0x290 net/sched/sch_mq.c:33 [] qdisc_destroy+0x12d/0x290 net/sched/sch_generic.c:953 [] qdisc_create_dflt+0xf0/0x120 net/sched/sch_generic.c:848 [] attach_default_qdiscs net/sched/sch_generic.c:1029 [inline] [] dev_activate+0x6ad/0x880 net/sched/sch_generic.c:1064 [] __dev_open+0x221/0x320 net/core/dev.c:1403 [] __dev_change_flags+0x15e/0x3e0 net/core/dev.c:6858 [] dev_change_flags+0x8e/0x140 net/core/dev.c:6926 [] dev_ifsioc+0x446/0x890 net/core/dev_ioctl.c:260 [] dev_ioctl+0x1ba/0xb80 net/core/dev_ioctl.c:546 [] sock_do_ioctl+0x99/0xb0 net/socket.c:879 [] sock_ioctl+0x2a0/0x390 net/socket.c:958 [] vfs_ioctl fs/ioctl.c:44 [inline] [] do_vfs_ioctl+0x8a8/0xe50 fs/ioctl.c:611 [] SYSC_ioctl fs/ioctl.c:626 [inline] [] SyS_ioctl+0x94/0xc0 fs/ioctl.c:617 [] entry_SYSCALL_64_fastpath+0x12/0x17 Signed-off-by: Eric Dumazet Reported-by: Dmitry Vyukov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 206dc24..4bc7a3b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1008,6 +1008,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev, return sch; } + /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ + ops->destroy(sch); err_out3: dev_put(dev); kfree((char *) sch - sch->padded); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index e3d0458..2fae8b5 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -627,7 +627,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * sizeof(u32)); if (!q->hhf_arrays[i]) { - hhf_destroy(sch); + /* Note: hhf_destroy() will be called + * by our caller. + */ return -ENOMEM; } } @@ -638,7 +640,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / BITS_PER_BYTE); if (!q->hhf_valid_bits[i]) { - hhf_destroy(sch); + /* Note: hhf_destroy() will be called + * by our caller. + */ return -ENOMEM; } } diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 2bc8d7f..20b7f16 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) /* pre-allocate qdiscs, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); - if (priv->qdiscs == NULL) + if (!priv->qdiscs) return -ENOMEM; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { @@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx), TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); - if (qdisc == NULL) - goto err; + if (!qdisc) + return -ENOMEM; priv->qdiscs[ntx] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } sch->flags |= TCQ_F_MQROOT; return 0; - -err: - mq_destroy(sch); - return -ENOMEM; } static void mq_attach(struct Qdisc *sch) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index b5c502c..9226834 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -118,10 +118,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) /* pre-allocate qdisc, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); - if (priv->qdiscs == NULL) { - err = -ENOMEM; - goto err; - } + if (!priv->qdiscs) + return -ENOMEM; for (i = 0; i < dev->num_tx_queues; i++) { dev_queue = netdev_get_tx_queue(dev, i); @@ -129,10 +127,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) get_default_qdisc_ops(dev, i), TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1))); - if (qdisc == NULL) { - err = -ENOMEM; - goto err; - } + if (!qdisc) + return -ENOMEM; + priv->qdiscs[i] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } @@ -148,7 +145,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) priv->hw_owned = 1; err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); if (err) - goto err; + return err; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) @@ -162,10 +159,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) sch->flags |= TCQ_F_MQROOT; return 0; - -err: - mqprio_destroy(sch); - return err; } static void mqprio_attach(struct Qdisc *sch) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 7f195ed..bc5e995 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); if (!q->ht || !q->slots) { - sfq_destroy(sch); + /* Note: sfq_destroy() will be called by our caller */ return -ENOMEM; } + for (i = 0; i < q->divisor; i++) q->ht[i] = SFQ_EMPTY_SLOT; -- cgit v0.10.2 From dc491cdd2c81588ca6110f07325e32535192bcf0 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Wed, 28 Jun 2017 12:53:54 +0800 Subject: net: sched: Fix one possible panic when no destroy callback commit c1a4872ebfb83b1af7144f7b29ac8c4b344a12a8 upstream. When qdisc fail to init, qdisc_create would invoke the destroy callback to cleanup. But there is no check if the callback exists really. So it would cause the panic if there is no real destroy callback like the qdisc codel, fq, and so on. Take codel as an example following: When a malicious user constructs one invalid netlink msg, it would cause codel_init->codel_change->nla_parse_nested failed. Then kernel would invoke the destroy callback directly but qdisc codel doesn't define one. It causes one panic as a result. Now add one the check for destroy to avoid the possible panic. Fixes: 87b60cfacf9f ("net_sched: fix error recovery at qdisc creation") Signed-off-by: Gao Feng Acked-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4bc7a3b..ff27a85 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1009,7 +1009,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev, return sch; } /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ - ops->destroy(sch); + if (ops->destroy) + ops->destroy(sch); err_out3: dev_put(dev); kfree((char *) sch - sch->padded); -- cgit v0.10.2 From 3f7e07c37ade390391a336e1f2e4c4d5a073c4a8 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Tue, 20 Jun 2017 12:48:11 -0500 Subject: net/phy: micrel: configure intterupts after autoneg workaround commit b866203d872d5deeafcecd25ea429d6748b5bd56 upstream. The commit ("net/phy: micrel: Add workaround for bad autoneg") fixes an autoneg failure case by resetting the hardware. This turns off intterupts. Things will work themselves out if the phy polls, as it will figure out it's state during a poll. However if the phy uses only intterupts, the phy will stall, since interrupts are off. This patch fixes the issue by calling config_intr after resetting the phy. Fixes: d2fd719bcb0e ("net/phy: micrel: Add workaround for bad autoneg ") Signed-off-by: Zach Brown Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fab56c9..2229188 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -622,6 +622,8 @@ static int ksz9031_read_status(struct phy_device *phydev) if ((regval & 0xFF) == 0xFF) { phy_init_hw(phydev); phydev->link = 0; + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) + phydev->drv->config_intr(phydev); } return 0; -- cgit v0.10.2 From 0526ff300304882d57712e29bb47634dc1110b20 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Wed, 21 Jun 2017 14:34:58 -0700 Subject: ipv6: avoid unregistering inet6_dev for loopback commit 60abc0be96e00ca71bac083215ac91ad2e575096 upstream. The per netns loopback_dev->ip6_ptr is unregistered and set to NULL when its mtu is set to smaller than IPV6_MIN_MTU, this leads to that we could set rt->rt6i_idev NULL after a rt6_uncached_list_flush_dev() and then crash after another call. In this case we should just bring its inet6_dev down, rather than unregistering it, at least prior to commit 176c39af29bc ("netns: fix addrconf_ifdown kernel panic") we always override the case for loopback. Thanks a lot to Andrey for finding a reliable reproducer. Fixes: 176c39af29bc ("netns: fix addrconf_ifdown kernel panic") Reported-by: Andrey Konovalov Cc: Andrey Konovalov Cc: Daniel Lezcano Cc: David Ahern Signed-off-by: Cong Wang Acked-by: David Ahern Tested-by: Andrey Konovalov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b6f4c42..4399beb 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3316,6 +3316,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct inet6_dev *idev = __in6_dev_get(dev); + struct net *net = dev_net(dev); int run_pending = 0; int err; @@ -3331,7 +3332,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEMTU: /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) { - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, dev != net->loopback_dev); break; } @@ -3447,7 +3448,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, * IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, dev != net->loopback_dev); } break; -- cgit v0.10.2 From cf81b4abe7ac73eaa4c9d5600eaa1bedd61ff6a3 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Fri, 23 Jun 2017 17:51:31 +0200 Subject: net: dp83640: Avoid NULL pointer dereference. commit db9d8b29d19d2801793e4419f4c6272bf8951c62 upstream. The function, skb_complete_tx_timestamp(), used to allow passing in a NULL pointer for the time stamps, but that was changed in commit 62bccb8cdb69051b95a55ab0c489e3cab261c8ef ("net-timestamp: Make the clone operation stand-alone from phy timestamping"), and the existing call sites, all of which are in the dp83640 driver, were fixed up. Even though the kernel-doc was subsequently updated in commit 7a76a021cd5a292be875fbc616daf03eab1e6996 ("net-timestamp: Update skb_complete_tx_timestamp comment"), still a bug fix from Manfred Rudigier came into the driver using the old semantics. Probably Manfred derived that patch from an older kernel version. This fix should be applied to the stable trees as well. Fixes: 81e8f2e930fe ("net: dp83640: Fix tx timestamp overflow handling.") Signed-off-by: Richard Cochran Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 4865221..b88f7d6 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640, if (overflow) { pr_debug("tx timestamp queue overflow, count %d\n", overflow); while (skb) { - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); skb = skb_dequeue(&dp83640->tx_queue); } return; -- cgit v0.10.2 From ef13840028c8389966dad9ed505c48b8b78bd3f0 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Sat, 24 Jun 2017 23:50:30 -0700 Subject: tcp: reset sk_rx_dst in tcp_disconnect() commit d747a7a51b00984127a88113cdbbc26f91e9d815 upstream. We have to reset the sk->sk_rx_dst when we disconnect a TCP connection, because otherwise when we re-connect it this dst reference is simply overridden in tcp_finish_connect(). This fixes a dst leak which leads to a loopback dev refcnt leak. It is a long-standing bug, Kevin reported a very similar (if not same) bug before. Thanks to Andrei for providing such a reliable reproducer which greatly narrows down the problem. Fixes: 41063e9dd119 ("ipv4: Early TCP socket demux.") Reported-by: Andrei Vagin Reported-by: Kevin Xu Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 86fbf0f..1a4db27 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2300,6 +2300,8 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_init_send_head(sk); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); + dst_release(sk->sk_rx_dst); + sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); -- cgit v0.10.2 From 3f04c32bf47a87b61a4df27faea7e2ab1c71388d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 27 Jun 2017 07:02:20 -0700 Subject: net: prevent sign extension in dev_get_stats() commit 6f64ec74515925cced6df4571638b5a099a49aae upstream. Similar to the fix provided by Dominik Heidler in commit 9b3dc0a17d73 ("l2tp: cast l2tp traffic counter to unsigned") we need to take care of 32bit kernels in dev_get_stats(). When using atomic_long_read(), we add a 'long' to u64 and might misinterpret high order bit, unless we cast to unsigned. Fixes: caf586e5f23ce ("net: add a core netdev->rx_dropped counter") Fixes: 015f0688f57ca ("net: net: add a core netdev->tx_dropped counter") Fixes: 6e7333d315a76 ("net: add rx_nohandler stat counter") Signed-off-by: Eric Dumazet Cc: Jarod Wilson Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/dev.c b/net/core/dev.c index 97f8061..002fc2f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7563,9 +7563,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, } else { netdev_stats_to_stats64(storage, &dev->stats); } - storage->rx_dropped += atomic_long_read(&dev->rx_dropped); - storage->tx_dropped += atomic_long_read(&dev->tx_dropped); - storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); + storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); + storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); + storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); return storage; } EXPORT_SYMBOL(dev_get_stats); -- cgit v0.10.2 From e5e5c0ec39dd380804c7c4be838da42392e3a44f Mon Sep 17 00:00:00 2001 From: Eduardo Valentin Date: Tue, 11 Jul 2017 14:55:12 -0700 Subject: bridge: mdb: fix leak on complete_info ptr on fail path commit 1bfb159673957644951ab0a8d2aec44b93ddb1ae upstream. We currently get the following kmemleak report: unreferenced object 0xffff8800039d9820 (size 32): comm "softirq", pid 0, jiffies 4295212383 (age 792.416s) hex dump (first 32 bytes): 00 0c e0 03 00 88 ff ff ff 02 00 00 00 00 00 00 ................ 00 00 00 01 ff 11 00 02 86 dd 00 00 ff ff ff ff ................ backtrace: [] kmemleak_alloc+0x4a/0xa0 [] kmem_cache_alloc_trace+0xb8/0x1c0 [] __br_mdb_notify+0x2a3/0x300 [bridge] [] br_mdb_notify+0x6e/0x70 [bridge] [] br_multicast_add_group+0x109/0x150 [bridge] [] br_ip6_multicast_add_group+0x58/0x60 [bridge] [] br_multicast_rcv+0x1d5/0xdb0 [bridge] [] br_handle_frame_finish+0xcf/0x510 [bridge] [] br_nf_hook_thresh.part.27+0xb/0x10 [br_netfilter] [] br_nf_hook_thresh+0x48/0xb0 [br_netfilter] [] br_nf_pre_routing_finish_ipv6+0x109/0x1d0 [br_netfilter] [] br_nf_pre_routing_ipv6+0xd0/0x14c [br_netfilter] [] br_nf_pre_routing+0x197/0x3d0 [br_netfilter] [] nf_iterate+0x52/0x60 [] nf_hook_slow+0x5c/0xb0 [] br_handle_frame+0x1a4/0x2c0 [bridge] This happens when switchdev_port_obj_add() fails. This patch frees complete_info object in the fail path. Reviewed-by: Vallish Vaidyeshwara Signed-off-by: Eduardo Valentin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 7dbc80d..6406010e 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, __mdb_entry_to_br_ip(entry, &complete_info->ip); mdb.obj.complete_priv = complete_info; mdb.obj.complete = br_mdb_complete; - switchdev_port_obj_add(port_dev, &mdb.obj); + if (switchdev_port_obj_add(port_dev, &mdb.obj)) + kfree(complete_info); } } else if (port_dev && type == RTM_DELMDB) { switchdev_port_obj_del(port_dev, &mdb.obj); -- cgit v0.10.2 From bee8070548921f8de7b9aee7cfddd64bf8fa43e1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 28 Jun 2017 14:44:21 +0300 Subject: rocker: move dereference before free commit acb4b7df48b539cb391287921de57e4e5fae3460 upstream. My static checker complains that ofdpa_neigh_del() can sometimes free "found". It just makes sense to use it first before deleting it. Fixes: ecf244f753e0 ("rocker: fix maybe-uninitialized warning") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 4ca4613..b1af7cd 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, *index = entry->index; resolved = false; } else if (removing) { - ofdpa_neigh_del(trans, found); *index = found->index; + ofdpa_neigh_del(trans, found); } else if (updating) { ofdpa_neigh_update(found, trans, NULL, false); resolved = !is_zero_ether_addr(found->eth_dst); -- cgit v0.10.2 From cd5de9cb858d3bffa15bc11bfc8584d4838e14bd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 29 Jun 2017 03:04:59 +0200 Subject: bpf: prevent leaking pointer via xadd on unpriviledged commit 6bdf6abc56b53103324dfd270a86580306e1a232 upstream. Leaking kernel addresses on unpriviledged is generally disallowed, for example, verifier rejects the following: 0: (b7) r0 = 0 1: (18) r2 = 0xffff897e82304400 3: (7b) *(u64 *)(r1 +48) = r2 R2 leaks addr into ctx Doing pointer arithmetic on them is also forbidden, so that they don't turn into unknown value and then get leaked out. However, there's xadd as a special case, where we don't check the src reg for being a pointer register, e.g. the following will pass: 0: (b7) r0 = 0 1: (7b) *(u64 *)(r1 +48) = r0 2: (18) r2 = 0xffff897e82304400 ; map 4: (db) lock *(u64 *)(r1 +48) += r2 5: (95) exit We could store the pointer into skb->cb, loose the type context, and then read it out from there again to leak it eventually out of a map value. Or more easily in a different variant, too: 0: (bf) r6 = r1 1: (7a) *(u64 *)(r10 -8) = 0 2: (bf) r2 = r10 3: (07) r2 += -8 4: (18) r1 = 0x0 6: (85) call bpf_map_lookup_elem#1 7: (15) if r0 == 0x0 goto pc+3 R0=map_value(ks=8,vs=8,id=0),min_value=0,max_value=0 R6=ctx R10=fp 8: (b7) r3 = 0 9: (7b) *(u64 *)(r0 +0) = r3 10: (db) lock *(u64 *)(r0 +0) += r6 11: (b7) r0 = 0 12: (95) exit from 7 to 11: R0=inv,min_value=0,max_value=0 R6=ctx R10=fp 11: (b7) r0 = 0 12: (95) exit Prevent this by checking xadd src reg for pointer types. Also add a couple of test cases related to this. Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Acked-by: Edward Cree Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 44c17f4..8ce679d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -885,6 +885,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; + if (is_pointer_value(env, insn->src_reg)) { + verbose("R%d leaks addr into mem\n", insn->src_reg); + return -EACCES; + } + /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); -- cgit v0.10.2 From 067328078d14910fdc04ce5ce20ce18c5126ab32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Kube=C4=8Dek?= Date: Thu, 29 Jun 2017 11:13:36 +0200 Subject: net: handle NAPI_GRO_FREE_STOLEN_HEAD case also in napi_frags_finish() commit e44699d2c28067f69698ccb68dd3ddeacfebc434 upstream. Recently I started seeing warnings about pages with refcount -1. The problem was traced to packets being reused after their head was merged into a GRO packet by skb_gro_receive(). While bisecting the issue pointed to commit c21b48cc1bbf ("net: adjust skb->truesize in ___pskb_trim()") and I have never seen it on a kernel with it reverted, I believe the real problem appeared earlier when the option to merge head frag in GRO was implemented. Handling NAPI_GRO_FREE_STOLEN_HEAD state was only added to GRO_MERGED_FREE branch of napi_skb_finish() so that if the driver uses napi_gro_frags() and head is merged (which in my case happens after the skb_condense() call added by the commit mentioned above), the skb is reused including the head that has been merged. As a result, we release the page reference twice and eventually end up with negative page refcount. To fix the problem, handle NAPI_GRO_FREE_STOLEN_HEAD in napi_frags_finish() the same way it's done in napi_skb_finish(). Fixes: d7e8883cfcf4 ("net: make GRO aware of skb->head_frag") Signed-off-by: Michal Kubecek Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/dev.c b/net/core/dev.c index 002fc2f..3d62af2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4641,6 +4641,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type) } EXPORT_SYMBOL(gro_find_complete_by_type); +static void napi_skb_free_stolen_head(struct sk_buff *skb) +{ + skb_dst_drop(skb); + kmem_cache_free(skbuff_head_cache, skb); +} + static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) { switch (ret) { @@ -4654,12 +4660,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { - skb_dst_drop(skb); - kmem_cache_free(skbuff_head_cache, skb); - } else { + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else __kfree_skb(skb); - } break; case GRO_HELD: @@ -4729,10 +4733,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, break; case GRO_DROP: - case GRO_MERGED_FREE: napi_reuse_skb(napi, skb); break; + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else + napi_reuse_skb(napi, skb); + break; + case GRO_MERGED: break; } -- cgit v0.10.2 From e20204dc2c74b896b3ef4f4328acb15f4abcd5cb Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Thu, 30 Mar 2017 17:09:00 +0300 Subject: net/mlx5: Cancel delayed recovery work when unloading the driver commit 2a0165a034ac024b60cca49c61e46f4afa2e4d98 upstream. Draining the health workqueue will ignore future health works including the one that report hardware failure and thus we can't enter error state Instead cancel the recovery flow and make sure only recovery flow won't be scheduled. Fixes: 5e44fca50470 ('net/mlx5: Only cancel recovery work when cleaning up device') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 2115c8a..8beecd6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -67,6 +67,7 @@ enum { enum { MLX5_DROP_NEW_HEALTH_WORK, + MLX5_DROP_NEW_RECOVERY_WORK, }; static u8 get_nic_state(struct mlx5_core_dev *dev) @@ -193,7 +194,7 @@ static void health_care(struct work_struct *work) mlx5_handle_bad_state(dev); spin_lock(&health->wq_lock); - if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) + if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) schedule_delayed_work(&health->recover_work, recover_delay); else dev_err(&dev->pdev->dev, @@ -328,6 +329,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) init_timer(&health->timer); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; @@ -350,11 +352,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) spin_lock(&health->wq_lock); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock(&health->wq_lock); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + spin_lock(&health->wq_lock); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); + spin_unlock(&health->wq_lock); + cancel_delayed_work_sync(&dev->priv.health.recover_work); +} + void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5bea0bf..b3309f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1169,7 +1169,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, int err = 0; if (cleanup) - mlx5_drain_health_wq(dev); + mlx5_drain_health_recovery(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e1a903a..6a620e0 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -788,6 +788,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); -- cgit v0.10.2 From a80a70a4822ca842c16b29c8d9e4336a95375c61 Mon Sep 17 00:00:00 2001 From: Derek Chickles Date: Wed, 5 Jul 2017 11:59:27 -0700 Subject: liquidio: fix bug in soft reset failure detection commit 05a6b4cae8c0cc1680c9dd33a97a49a13c0f01bc upstream. The code that detects a failed soft reset of Octeon is comparing the wrong value against the reset value of the Octeon SLI_SCRATCH_1 register, resulting in an inability to detect a soft reset failure. Fix it by using the correct value in the comparison, which is any non-zero value. Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters") Fixes: c0eab5b3580a ("liquidio: CN23XX firmware download") Signed-off-by: Derek Chickles Signed-off-by: Satanand Burla Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 380a641..258bc8d 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -230,7 +230,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct) /* Wait for 100ms as Octeon resets. */ mdelay(100); - if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) { + if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) { dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n", oct->octeon_id); return 1; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index e779af8..cda32d5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -48,7 +48,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct) /* Wait for 10ms as Octeon resets. */ mdelay(100); - if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { + if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) { dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); return 1; } -- cgit v0.10.2 From 743564306ee61a16fd9855528c6972653183a037 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 25 Jun 2017 16:46:25 +0300 Subject: net/mlx5e: Fix TX carrier errors report in get stats ndo commit 8ff93de7668bd81bc8efa819d1184ebd48fae72d upstream. Symbol error during carrier counter from PPCNT was mistakenly reported as TX carrier errors in get_stats ndo, although it's an RX counter. Fixes: 269e6b3af3bf ("net/mlx5e: Report additional error statistics in get stats ndo") Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ea58234..9d37229 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2671,8 +2671,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); - stats->tx_carrier_errors = - PPORT_802_3_GET(pstats, a_symbol_error_during_carrier); stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + stats->rx_frame_errors; stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; -- cgit v0.10.2 From d2c9512085784a6387d688f57b2671c6c15fc984 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Thu, 29 Jun 2017 16:56:54 +0200 Subject: ipv6: dad: don't remove dynamic addresses if link is down commit ec8add2a4c9df723c94a863b8fcd6d93c472deed upstream. Currently, when the link for $DEV is down, this command succeeds but the address is removed immediately by DAD (1): ip addr add 1111::12/64 dev $DEV valid_lft 3600 preferred_lft 1800 In the same situation, this will succeed and not remove the address (2): ip addr add 1111::12/64 dev $DEV ip addr change 1111::12/64 dev $DEV valid_lft 3600 preferred_lft 1800 The comment in addrconf_dad_begin() when !IF_READY makes it look like this is the intended behavior, but doesn't explain why: * If the device is not ready: * - keep it tentative if it is a permanent address. * - otherwise, kill it. We clearly cannot prevent userspace from doing (2), but we can make (1) work consistently with (2). addrconf_dad_stop() is only called in two cases: if DAD failed, or to skip DAD when the link is down. In that second case, the fix is to avoid deleting the address, like we already do for permanent addresses. Fixes: 3c21edbd1137 ("[IPV6]: Defer IPv6 device initialization until the link becomes ready.") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4399beb..b2cabda 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1875,15 +1875,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) if (dad_failed) ifp->flags |= IFA_F_DADFAILED; - if (ifp->flags&IFA_F_PERMANENT) { - spin_lock_bh(&ifp->lock); - addrconf_del_dad_work(ifp); - ifp->flags |= IFA_F_TENTATIVE; - spin_unlock_bh(&ifp->lock); - if (dad_failed) - ipv6_ifa_notify(0, ifp); - in6_ifa_put(ifp); - } else if (ifp->flags&IFA_F_TEMPORARY) { + if (ifp->flags&IFA_F_TEMPORARY) { struct inet6_ifaddr *ifpub; spin_lock_bh(&ifp->lock); ifpub = ifp->ifpub; @@ -1896,6 +1888,14 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) spin_unlock_bh(&ifp->lock); } ipv6_del_addr(ifp); + } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { + spin_lock_bh(&ifp->lock); + addrconf_del_dad_work(ifp); + ifp->flags |= IFA_F_TENTATIVE; + spin_unlock_bh(&ifp->lock); + if (dad_failed) + ipv6_ifa_notify(0, ifp); + in6_ifa_put(ifp); } else { ipv6_del_addr(ifp); } -- cgit v0.10.2 From beabc6032772d186976742438d6b1f290894ce44 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Sun, 2 Jul 2017 19:00:57 +0200 Subject: vxlan: fix hlist corruption [ Upstream commit 69e766612c4bcb79e19cebed9eed61d4222c1d47 ] It's not a good idea to add the same hlist_node to two different hash lists. This leads to various hard to debug memory corruptions. Fixes: b1be00a6c39f ("vxlan: support both IPv4 and IPv6 sockets in a single vxlan device") Signed-off-by: Jiri Benc Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 963e533..983e941 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -227,15 +227,15 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) { - struct vxlan_dev *vxlan; + struct vxlan_dev_node *node; /* For flow based devices, map all packets to VNI 0 */ if (vs->flags & VXLAN_F_COLLECT_METADATA) vni = 0; - hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { - if (vxlan->default_dst.remote_vni == vni) - return vxlan; + hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) { + if (node->vxlan->default_dst.remote_vni == vni) + return node->vxlan; } return NULL; @@ -2309,17 +2309,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); spin_lock(&vn->sock_lock); - hlist_del_init_rcu(&vxlan->hlist); + hlist_del_init_rcu(&vxlan->hlist4.hlist); +#if IS_ENABLED(CONFIG_IPV6) + hlist_del_init_rcu(&vxlan->hlist6.hlist); +#endif spin_unlock(&vn->sock_lock); } -static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) +static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, + struct vxlan_dev_node *node) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); __be32 vni = vxlan->default_dst.remote_vni; + node->vxlan = vxlan; spin_lock(&vn->sock_lock); - hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); + hlist_add_head_rcu(&node->hlist, vni_head(vs, vni)); spin_unlock(&vn->sock_lock); } @@ -2778,6 +2783,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs = NULL; + struct vxlan_dev_node *node; if (!vxlan->cfg.no_share) { spin_lock(&vn->sock_lock); @@ -2795,12 +2801,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) if (IS_ERR(vs)) return PTR_ERR(vs); #if IS_ENABLED(CONFIG_IPV6) - if (ipv6) + if (ipv6) { rcu_assign_pointer(vxlan->vn6_sock, vs); - else + node = &vxlan->hlist6; + } else #endif + { rcu_assign_pointer(vxlan->vn4_sock, vs); - vxlan_vs_add_dev(vs, vxlan); + node = &vxlan->hlist4; + } + vxlan_vs_add_dev(vs, vxlan, node); return 0; } diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 308adc4..9fce47e 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -221,9 +221,17 @@ struct vxlan_config { bool no_share; }; +struct vxlan_dev_node { + struct hlist_node hlist; + struct vxlan_dev *vxlan; +}; + /* Pseudo network device */ struct vxlan_dev { - struct hlist_node hlist; /* vni hash table */ + struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */ +#if IS_ENABLED(CONFIG_IPV6) + struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */ +#endif struct list_head next; /* vxlan's per namespace list */ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */ #if IS_ENABLED(CONFIG_IPV6) -- cgit v0.10.2 From 05e165e9bc45f5f5acdb2a072bd04c5840be8536 Mon Sep 17 00:00:00 2001 From: Alban Browaeys Date: Mon, 3 Jul 2017 03:20:13 +0200 Subject: net: core: Fix slab-out-of-bounds in netdev_stats_to_stats64 commit 9af9959e142c274f4a30fefb71d97d2b028b337f upstream. commit 9256645af098 ("net/core: relax BUILD_BUG_ON in netdev_stats_to_stats64") made an attempt to read beyond the size of the source a possibility. Fix to only copy src size to dest. As dest might be bigger than src. ================================================================== BUG: KASAN: slab-out-of-bounds in netdev_stats_to_stats64+0xe/0x30 at addr ffff8801be248b20 Read of size 192 by task VBoxNetAdpCtl/6734 CPU: 1 PID: 6734 Comm: VBoxNetAdpCtl Tainted: G O 4.11.4prahal+intel+ #118 Hardware name: LENOVO 20CDCTO1WW/20CDCTO1WW, BIOS GQET52WW (1.32 ) 05/04/2017 Call Trace: dump_stack+0x63/0x86 kasan_object_err+0x1c/0x70 kasan_report+0x270/0x520 ? netdev_stats_to_stats64+0xe/0x30 ? sched_clock_cpu+0x1b/0x190 ? __module_address+0x3e/0x3b0 ? unwind_next_frame+0x1ea/0xb00 check_memory_region+0x13c/0x1a0 memcpy+0x23/0x50 netdev_stats_to_stats64+0xe/0x30 dev_get_stats+0x1b9/0x230 rtnl_fill_stats+0x44/0xc00 ? nla_put+0xc6/0x130 rtnl_fill_ifinfo+0xe9e/0x3700 ? rtnl_fill_vfinfo+0xde0/0xde0 ? sched_clock+0x9/0x10 ? sched_clock+0x9/0x10 ? sched_clock_local+0x120/0x130 ? __module_address+0x3e/0x3b0 ? unwind_next_frame+0x1ea/0xb00 ? sched_clock+0x9/0x10 ? sched_clock+0x9/0x10 ? sched_clock_cpu+0x1b/0x190 ? VBoxNetAdpLinuxIOCtlUnlocked+0x14b/0x280 [vboxnetadp] ? depot_save_stack+0x1d8/0x4a0 ? depot_save_stack+0x34f/0x4a0 ? depot_save_stack+0x34f/0x4a0 ? save_stack+0xb1/0xd0 ? save_stack_trace+0x16/0x20 ? save_stack+0x46/0xd0 ? kasan_slab_alloc+0x12/0x20 ? __kmalloc_node_track_caller+0x10d/0x350 ? __kmalloc_reserve.isra.36+0x2c/0xc0 ? __alloc_skb+0xd0/0x560 ? rtmsg_ifinfo_build_skb+0x61/0x120 ? rtmsg_ifinfo.part.25+0x16/0xb0 ? rtmsg_ifinfo+0x47/0x70 ? register_netdev+0x15/0x30 ? vboxNetAdpOsCreate+0xc0/0x1c0 [vboxnetadp] ? vboxNetAdpCreate+0x210/0x400 [vboxnetadp] ? VBoxNetAdpLinuxIOCtlUnlocked+0x14b/0x280 [vboxnetadp] ? do_vfs_ioctl+0x17f/0xff0 ? SyS_ioctl+0x74/0x80 ? do_syscall_64+0x182/0x390 ? __alloc_skb+0xd0/0x560 ? __alloc_skb+0xd0/0x560 ? save_stack_trace+0x16/0x20 ? init_object+0x64/0xa0 ? ___slab_alloc+0x1ae/0x5c0 ? ___slab_alloc+0x1ae/0x5c0 ? __alloc_skb+0xd0/0x560 ? sched_clock+0x9/0x10 ? kasan_unpoison_shadow+0x35/0x50 ? kasan_kmalloc+0xad/0xe0 ? __kmalloc_node_track_caller+0x246/0x350 ? __alloc_skb+0xd0/0x560 ? kasan_unpoison_shadow+0x35/0x50 ? memset+0x31/0x40 ? __alloc_skb+0x31f/0x560 ? napi_consume_skb+0x320/0x320 ? br_get_link_af_size_filtered+0xb7/0x120 [bridge] ? if_nlmsg_size+0x440/0x630 rtmsg_ifinfo_build_skb+0x83/0x120 rtmsg_ifinfo.part.25+0x16/0xb0 rtmsg_ifinfo+0x47/0x70 register_netdevice+0xa2b/0xe50 ? __kmalloc+0x171/0x2d0 ? netdev_change_features+0x80/0x80 register_netdev+0x15/0x30 vboxNetAdpOsCreate+0xc0/0x1c0 [vboxnetadp] vboxNetAdpCreate+0x210/0x400 [vboxnetadp] ? vboxNetAdpComposeMACAddress+0x1d0/0x1d0 [vboxnetadp] ? kasan_check_write+0x14/0x20 VBoxNetAdpLinuxIOCtlUnlocked+0x14b/0x280 [vboxnetadp] ? VBoxNetAdpLinuxOpen+0x20/0x20 [vboxnetadp] ? lock_acquire+0x11c/0x270 ? __audit_syscall_entry+0x2fb/0x660 do_vfs_ioctl+0x17f/0xff0 ? __audit_syscall_entry+0x2fb/0x660 ? ioctl_preallocate+0x1d0/0x1d0 ? __audit_syscall_entry+0x2fb/0x660 ? kmem_cache_free+0xb2/0x250 ? syscall_trace_enter+0x537/0xd00 ? exit_to_usermode_loop+0x100/0x100 SyS_ioctl+0x74/0x80 ? do_sys_open+0x350/0x350 ? do_vfs_ioctl+0xff0/0xff0 do_syscall_64+0x182/0x390 entry_SYSCALL64_slow_path+0x25/0x25 RIP: 0033:0x7f7e39a1ae07 RSP: 002b:00007ffc6f04c6d8 EFLAGS: 00000206 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007ffc6f04c730 RCX: 00007f7e39a1ae07 RDX: 00007ffc6f04c730 RSI: 00000000c0207601 RDI: 0000000000000007 RBP: 00007ffc6f04c700 R08: 00007ffc6f04c780 R09: 0000000000000008 R10: 0000000000000541 R11: 0000000000000206 R12: 0000000000000007 R13: 00000000c0207601 R14: 00007ffc6f04c730 R15: 0000000000000012 Object at ffff8801be248008, in cache kmalloc-4096 size: 4096 Allocated: PID = 6734 save_stack_trace+0x16/0x20 save_stack+0x46/0xd0 kasan_kmalloc+0xad/0xe0 __kmalloc+0x171/0x2d0 alloc_netdev_mqs+0x8a7/0xbe0 vboxNetAdpOsCreate+0x65/0x1c0 [vboxnetadp] vboxNetAdpCreate+0x210/0x400 [vboxnetadp] VBoxNetAdpLinuxIOCtlUnlocked+0x14b/0x280 [vboxnetadp] do_vfs_ioctl+0x17f/0xff0 SyS_ioctl+0x74/0x80 do_syscall_64+0x182/0x390 return_from_SYSCALL_64+0x0/0x6a Freed: PID = 5600 save_stack_trace+0x16/0x20 save_stack+0x46/0xd0 kasan_slab_free+0x73/0xc0 kfree+0xe4/0x220 kvfree+0x25/0x30 single_release+0x74/0xb0 __fput+0x265/0x6b0 ____fput+0x9/0x10 task_work_run+0xd5/0x150 exit_to_usermode_loop+0xe2/0x100 do_syscall_64+0x26c/0x390 return_from_SYSCALL_64+0x0/0x6a Memory state around the buggy address: ffff8801be248a80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff8801be248b00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffff8801be248b80: 00 00 00 00 00 00 00 00 00 00 00 07 fc fc fc fc ^ ffff8801be248c00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff8801be248c80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ================================================================== Signed-off-by: Alban Browaeys Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/dev.c b/net/core/dev.c index 3d62af2..c17952b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7531,7 +7531,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, { #if BITS_PER_LONG == 64 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); - memcpy(stats64, netdev_stats, sizeof(*stats64)); + memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + sizeof(*netdev_stats), 0, sizeof(*stats64) - sizeof(*netdev_stats)); -- cgit v0.10.2 From 0bc26d1ca38d63cfbfaf5f4a7f8dacea27cf5109 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 5 Jul 2017 14:41:46 -0600 Subject: net: ipv6: Compare lwstate in detecting duplicate nexthops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f06b7549b79e29a672336d4e134524373fb7a232 upstream. Lennert reported a failure to add different mpls encaps in a multipath route: $ ip -6 route add 1234::/16 \ nexthop encap mpls 10 via fe80::1 dev ens3 \ nexthop encap mpls 20 via fe80::1 dev ens3 RTNETLINK answers: File exists The problem is that the duplicate nexthop detection does not compare lwtunnel configuration. Add it. Fixes: 19e42e451506 ("ipv6: support for fib route lwtunnel encap attributes") Signed-off-by: David Ahern Reported-by: João Taveira Araújo Reported-by: Lennert Buytenhek Acked-by: Roopa Prabhu Tested-by: Lennert Buytenhek Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2e347d4..2c43993 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -22,6 +22,7 @@ struct route_info { #include #include #include +#include #include #include #include @@ -232,4 +233,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, return daddr; } +static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) +{ + return a->dst.dev == b->dst.dev && + a->rt6i_idev == b->rt6i_idev && + ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && + !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); +} #endif diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 636d4d8..4345ee3 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -771,10 +771,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, goto next_iter; } - if (iter->dst.dev == rt->dst.dev && - iter->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&iter->rt6i_gateway, - &rt->rt6i_gateway)) { + if (rt6_duplicate_nexthop(iter, rt)) { if (rt->rt6i_nsiblings) rt->rt6i_nsiblings = 0; if (!(iter->rt6i_flags & RTF_EXPIRES)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b8b4753..5764a84 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2953,17 +2953,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; - struct rt6_info *rtnh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ - rtnh = nh->rt6_info; - - if (rtnh->dst.dev == rt->dst.dev && - rtnh->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&rtnh->rt6i_gateway, - &rt->rt6i_gateway)) + if (rt6_duplicate_nexthop(nh->rt6_info, rt)) return err; } -- cgit v0.10.2 From e6577f1ef3f80141333d83ba51b20295d794b8f8 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 6 Jul 2017 15:24:40 +0300 Subject: vrf: fix bug_on triggered by rx when destroying a vrf commit f630c38ef0d785101363a8992bbd4f302180f86f upstream. When destroying a VRF device we cleanup the slaves in its ndo_uninit() function, but that causes packets to be switched (skb->dev == vrf being destroyed) even though we're pass the point where the VRF should be receiving any packets while it is being dismantled. This causes a BUG_ON to trigger if we have raw sockets (trace below). The reason is that the inetdev of the VRF has been destroyed but we're still sending packets up the stack with it, so let's free the slaves in the dellink callback as David Ahern suggested. Note that this fix doesn't prevent packets from going up when the VRF device is admin down. [ 35.631371] ------------[ cut here ]------------ [ 35.631603] kernel BUG at net/ipv4/fib_frontend.c:285! [ 35.631854] invalid opcode: 0000 [#1] SMP [ 35.631977] Modules linked in: [ 35.632081] CPU: 2 PID: 22 Comm: ksoftirqd/2 Not tainted 4.12.0-rc7+ #45 [ 35.632247] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 [ 35.632477] task: ffff88005ad68000 task.stack: ffff88005ad64000 [ 35.632632] RIP: 0010:fib_compute_spec_dst+0xfc/0x1ee [ 35.632769] RSP: 0018:ffff88005ad67978 EFLAGS: 00010202 [ 35.632910] RAX: 0000000000000001 RBX: ffff880059a7f200 RCX: 0000000000000000 [ 35.633084] RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffffffff82274af0 [ 35.633256] RBP: ffff88005ad679f8 R08: 000000000001ef70 R09: 0000000000000046 [ 35.633430] R10: ffff88005ad679f8 R11: ffff880037731cb0 R12: 0000000000000001 [ 35.633603] R13: ffff8800599e3000 R14: 0000000000000000 R15: ffff8800599cb852 [ 35.634114] FS: 0000000000000000(0000) GS:ffff88005d900000(0000) knlGS:0000000000000000 [ 35.634306] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 35.634456] CR2: 00007f3563227095 CR3: 000000000201d000 CR4: 00000000000406e0 [ 35.634632] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 35.634865] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 35.635055] Call Trace: [ 35.635271] ? __lock_acquire+0xf0d/0x1117 [ 35.635522] ipv4_pktinfo_prepare+0x82/0x151 [ 35.635831] raw_rcv_skb+0x17/0x3c [ 35.636062] raw_rcv+0xe5/0xf7 [ 35.636287] raw_local_deliver+0x169/0x1d9 [ 35.636534] ip_local_deliver_finish+0x87/0x1c4 [ 35.636820] ip_local_deliver+0x63/0x7f [ 35.637058] ip_rcv_finish+0x340/0x3a1 [ 35.637295] ip_rcv+0x314/0x34a [ 35.637525] __netif_receive_skb_core+0x49f/0x7c5 [ 35.637780] ? lock_acquire+0x13f/0x1d7 [ 35.638018] ? lock_acquire+0x15e/0x1d7 [ 35.638259] __netif_receive_skb+0x1e/0x94 [ 35.638502] ? __netif_receive_skb+0x1e/0x94 [ 35.638748] netif_receive_skb_internal+0x74/0x300 [ 35.639002] ? dev_gro_receive+0x2ed/0x411 [ 35.639246] ? lock_is_held_type+0xc4/0xd2 [ 35.639491] napi_gro_receive+0x105/0x1a0 [ 35.639736] receive_buf+0xc32/0xc74 [ 35.639965] ? detach_buf+0x67/0x153 [ 35.640201] ? virtqueue_get_buf_ctx+0x120/0x176 [ 35.640453] virtnet_poll+0x128/0x1c5 [ 35.640690] net_rx_action+0x103/0x343 [ 35.640932] __do_softirq+0x1c7/0x4b7 [ 35.641171] run_ksoftirqd+0x23/0x5c [ 35.641403] smpboot_thread_fn+0x24f/0x26d [ 35.641646] ? sort_range+0x22/0x22 [ 35.641878] kthread+0x129/0x131 [ 35.642104] ? __list_add+0x31/0x31 [ 35.642335] ? __list_add+0x31/0x31 [ 35.642568] ret_from_fork+0x2a/0x40 [ 35.642804] Code: 05 bd 87 a3 00 01 e8 1f ef 98 ff 4d 85 f6 48 c7 c7 f0 4a 27 82 41 0f 94 c4 31 c9 31 d2 41 0f b6 f4 e8 04 71 a1 ff 45 84 e4 74 02 <0f> 0b 0f b7 93 c4 00 00 00 4d 8b a5 80 05 00 00 48 03 93 d0 00 [ 35.644342] RIP: fib_compute_spec_dst+0xfc/0x1ee RSP: ffff88005ad67978 Fixes: 193125dbd8eb ("net: Introduce VRF device driver") Reported-by: Chris Cormier Signed-off-by: Nikolay Aleksandrov Acked-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 642df93..578bd50 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -787,15 +787,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) static void vrf_dev_uninit(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct net_device *port_dev; - struct list_head *iter; vrf_rtable_release(dev, vrf); vrf_rt6_release(dev, vrf); - netdev_for_each_lower_dev(dev, port_dev, iter) - vrf_del_slave(dev, port_dev); - free_percpu(dev->dstats); dev->dstats = NULL; } @@ -1232,6 +1227,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) static void vrf_dellink(struct net_device *dev, struct list_head *head) { + struct net_device *port_dev; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, port_dev, iter) + vrf_del_slave(dev, port_dev); + unregister_netdevice_queue(dev, head); } -- cgit v0.10.2 From 181dda46a314a34515c444e9fe7e71298cd24839 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Thu, 6 Jul 2017 08:15:06 -0700 Subject: rds: tcp: use sock_create_lite() to create the accept socket commit 0933a578cd55b02dc80f219dc8f2efb17ec61c9a upstream. There are two problems with calling sock_create_kern() from rds_tcp_accept_one() 1. it sets up a new_sock->sk that is wasteful, because this ->sk is going to get replaced by inet_accept() in the subsequent ->accept() 2. The new_sock->sk is a leaked reference in sock_graft() which expects to find a null parent->sk Avoid these problems by calling sock_create_lite(). Signed-off-by: Sowmini Varadhan Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index e0b23fb..525b624 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -129,7 +129,7 @@ int rds_tcp_accept_one(struct socket *sock) if (!sock) /* module unload or netns delete in progress */ return -ENETUNREACH; - ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, + ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, sock->sk->sk_protocol, &new_sock); if (ret) -- cgit v0.10.2 From 414848bba6ab91fe12ca8105b4652c4aa6f4b574 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 7 Jul 2017 21:09:06 +0100 Subject: brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8f44c9a41386729fea410e688959ddaa9d51be7c upstream. The lower level nl80211 code in cfg80211 ensures that "len" is between 25 and NL80211_ATTR_FRAME (2304). We subtract DOT11_MGMT_HDR_LEN (24) from "len" so thats's max of 2280. However, the action_frame->data[] buffer is only BRCMF_FIL_ACTION_FRAME_SIZE (1800) bytes long so this memcpy() can overflow. memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], le16_to_cpu(action_frame->len)); Fixes: 18e2f61db3b70 ("brcmfmac: P2P action frame tx.") Reported-by: "freenerguo(郭大兴)" Signed-off-by: Arend van Spriel Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 0f5dde1..9892df9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4928,6 +4928,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_KERNEL); } else if (ieee80211_is_action(mgmt->frame_control)) { + if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { + brcmf_err("invalid action frame length\n"); + err = -EINVAL; + goto exit; + } af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); if (af_params == NULL) { brcmf_err("unable to allocate frame\n"); -- cgit v0.10.2 From fd325ddd58843dd518e2cd09994ef4691a1f4b5c Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Wed, 21 Jun 2017 07:45:53 +0200 Subject: brcmfmac: Fix a memory leak in error handling path in 'brcmf_cfg80211_attach' commit 57c00f2fac512837f8de73474ec1f54020015bae upstream. If 'wiphy_new()' fails, we leak 'ops'. Add a new label in the error handling path to free it in such a case. Fixes: 5c22fb85102a7 ("brcmfmac: add wowl gtk rekeying offload support") Signed-off-by: Christophe JAILLET Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 9892df9..1d4352e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6876,7 +6876,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info)); if (!wiphy) { brcmf_err("Could not allocate wiphy device\n"); - return NULL; + goto ops_out; } memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN); set_wiphy_dev(wiphy, busdev); @@ -7010,6 +7010,7 @@ priv_out: ifp->vif = NULL; wiphy_out: brcmf_free_wiphy(wiphy); +ops_out: kfree(ops); return NULL; } -- cgit v0.10.2 From 5f69bb1d18c503a24a576509826f82aabca0a4bd Mon Sep 17 00:00:00 2001 From: "Peter S. Housel" Date: Mon, 12 Jun 2017 11:46:22 +0100 Subject: brcmfmac: Fix glom_skb leak in brcmf_sdiod_recv_chain commit 5ea59db8a375216e6c915c5586f556766673b5a7 upstream. An earlier change to this function (3bdae810721b) fixed a leak in the case of an unsuccessful call to brcmf_sdiod_buffrw(). However, the glom_skb buffer, used for emulating a scattering read, is never used or referenced after its contents are copied into the destination buffers, and therefore always needs to be freed by the end of the function. Fixes: 3bdae810721b ("brcmfmac: Fix glob_skb leak in brcmf_sdiod_recv_chain") Fixes: a413e39a38573 ("brcmfmac: fix brcmf_sdcard_recv_chain() for host without sg support") Signed-off-by: Peter S. Housel Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 72139b5..746f8c9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -705,7 +705,7 @@ done: int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, struct sk_buff_head *pktq, uint totlen) { - struct sk_buff *glom_skb; + struct sk_buff *glom_skb = NULL; struct sk_buff *skb; u32 addr = sdiodev->sbwad; int err = 0; @@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, return -ENOMEM; err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, glom_skb); - if (err) { - brcmu_pkt_buf_free_skb(glom_skb); + if (err) goto done; - } skb_queue_walk(pktq, skb) { memcpy(skb->data, glom_skb->data, skb->len); @@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, pktq); done: + brcmu_pkt_buf_free_skb(glom_skb); return err; } -- cgit v0.10.2 From 6ae9dd281965423d95f63a5475c120f8af952a4f Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Wed, 12 Jul 2017 17:19:41 +0100 Subject: sfc: don't read beyond unicast address list [ Upstream commit c70d68150f71b84cea6997a53493e17bf18a54db ] If we have more than 32 unicast MAC addresses assigned to an interface we will read beyond the end of the address table in the driver when adding filters. The next 256 entries store multicast addresses, so we will end up attempting to insert duplicate filters, which is mostly harmless. If we add more than 288 unicast addresses we will then read past the multicast address table, which is likely to be more exciting. Fixes: 12fb0da45c9a ("sfc: clean fallbacks between promisc/normal in efx_ef10_filter_sync_rx_mode") Signed-off-by: Bert Kenward Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 00279da..c4ada72 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4399,12 +4399,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *uc; - int addr_count; unsigned int i; - addr_count = netdev_uc_count(net_dev); table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); - table->dev_uc_count = 1 + addr_count; ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); i = 1; netdev_for_each_uc_addr(uc, net_dev) { @@ -4415,6 +4412,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); i++; } + + table->dev_uc_count = i; } static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) @@ -4422,11 +4421,10 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *mc; - unsigned int i, addr_count; + unsigned int i; table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); - addr_count = netdev_mc_count(net_dev); i = 0; netdev_for_each_mc_addr(mc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { -- cgit v0.10.2 From b68aa7dff96efc59fee40e25b8044017de21161a Mon Sep 17 00:00:00 2001 From: Srinivas Dasari Date: Fri, 7 Jul 2017 01:43:41 +0300 Subject: cfg80211: Define nla_policy for NL80211_ATTR_LOCAL_MESH_POWER_MODE commit 8feb69c7bd89513be80eb19198d48f154b254021 upstream. Buffer overread may happen as nl80211_set_station() reads 4 bytes from the attribute NL80211_ATTR_LOCAL_MESH_POWER_MODE without validating the size of data received when userspace sends less than 4 bytes of data with NL80211_ATTR_LOCAL_MESH_POWER_MODE. Define nla_policy for NL80211_ATTR_LOCAL_MESH_POWER_MODE to avoid the buffer overread. Fixes: 3b1c5a5307f ("{cfg,nl}80211: mesh power mode primitives and userspace access") Signed-off-by: Srinivas Dasari Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e7a3068..c5d7d24 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -362,6 +362,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, + [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 }, [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, -- cgit v0.10.2 From e471290603ee82cac555cd729d1eed1e2950665a Mon Sep 17 00:00:00 2001 From: Srinivas Dasari Date: Fri, 7 Jul 2017 01:43:42 +0300 Subject: cfg80211: Validate frequencies nested in NL80211_ATTR_SCAN_FREQUENCIES commit d7f13f7450369281a5d0ea463cc69890a15923ae upstream. validate_scan_freqs() retrieves frequencies from attributes nested in the attribute NL80211_ATTR_SCAN_FREQUENCIES with nla_get_u32(), which reads 4 bytes from each attribute without validating the size of data received. Attributes nested in NL80211_ATTR_SCAN_FREQUENCIES don't have an nla policy. Validate size of each attribute before parsing to avoid potential buffer overread. Fixes: 2a519311926 ("cfg80211/nl80211: scanning (and mac80211 update to use it)") Signed-off-by: Srinivas Dasari Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c5d7d24..fee7b1b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6327,6 +6327,10 @@ static int validate_scan_freqs(struct nlattr *freqs) struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; + nla_for_each_nested(attr1, freqs, tmp1) + if (nla_len(attr1) != sizeof(u32)) + return 0; + nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* -- cgit v0.10.2 From 7e9e9638afdfa122ba0dc314c9219dd90ed116b8 Mon Sep 17 00:00:00 2001 From: Srinivas Dasari Date: Fri, 7 Jul 2017 01:43:39 +0300 Subject: cfg80211: Check if PMKID attribute is of expected size commit 9361df14d1cbf966409d5d6f48bb334384fbe138 upstream. nla policy checks for only maximum length of the attribute data when the attribute type is NLA_BINARY. If userspace sends less data than specified, the wireless drivers may access illegal memory. When type is NLA_UNSPEC, nla policy check ensures that userspace sends minimum specified length number of bytes. Remove type assignment to NLA_BINARY from nla_policy of NL80211_ATTR_PMKID to make this NLA_UNSPEC and to make sure minimum WLAN_PMKID_LEN bytes are received from userspace with NL80211_ATTR_PMKID. Fixes: 67fbb16be69d ("nl80211: PMKSA caching support") Signed-off-by: Srinivas Dasari Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fee7b1b..6de4231 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -305,8 +305,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, - [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, - .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, -- cgit v0.10.2 From 6a90f81ab6398499e54927f5232b0a90d447b6de Mon Sep 17 00:00:00 2001 From: Srinivas Dasari Date: Fri, 7 Jul 2017 01:43:40 +0300 Subject: cfg80211: Check if NAN service ID is of expected size commit 0a27844ce86d039d74221dd56cd8c0349b146b63 upstream. nla policy checks for only maximum length of the attribute data when the attribute type is NLA_BINARY. If userspace sends less data than specified, cfg80211 may access illegal memory. When type is NLA_UNSPEC, nla policy check ensures that userspace sends minimum specified length number of bytes. Remove type assignment to NLA_BINARY from nla_policy of NL80211_NAN_FUNC_SERVICE_ID to make these NLA_UNSPEC and to make sure minimum NL80211_NAN_FUNC_SERVICE_ID_LEN bytes are received from userspace with NL80211_NAN_FUNC_SERVICE_ID. Fixes: a442b761b24 ("cfg80211: add add_nan_func / del_nan_func") Signed-off-by: Srinivas Dasari Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6de4231..e9e9bc5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -512,7 +512,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = { static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 }, - [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY, + [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG }, -- cgit v0.10.2 From 59613f80ec670685b83932350980194ea082c96a Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 30 Jun 2017 10:58:28 +0100 Subject: irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity commit 866d7c1b0a3c70387646c4e455e727a58c5d465a upstream. The GICv3 driver doesn't check if the target CPU for gic_set_affinity is valid before going ahead and making the changes. This triggers the following splat with KASAN: [ 141.189434] BUG: KASAN: global-out-of-bounds in gic_set_affinity+0x8c/0x140 [ 141.189704] Read of size 8 at addr ffff200009741d20 by task swapper/1/0 [ 141.189958] [ 141.190158] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.12.0-rc7 [ 141.190458] Hardware name: Foundation-v8A (DT) [ 141.190658] Call trace: [ 141.190908] [] dump_backtrace+0x0/0x328 [ 141.191224] [] show_stack+0x14/0x20 [ 141.191507] [] dump_stack+0xa4/0xc8 [ 141.191858] [] print_address_description+0x13c/0x250 [ 141.192219] [] kasan_report+0x210/0x300 [ 141.192547] [] __asan_load8+0x84/0x98 [ 141.192874] [] gic_set_affinity+0x8c/0x140 [ 141.193158] [] irq_do_set_affinity+0x54/0xb8 [ 141.193473] [] irq_set_affinity_locked+0x64/0xf0 [ 141.193828] [] __irq_set_affinity+0x48/0x78 [ 141.194158] [] arm_perf_starting_cpu+0x104/0x150 [ 141.194513] [] cpuhp_invoke_callback+0x17c/0x1f8 [ 141.194783] [] notify_cpu_starting+0x8c/0xb8 [ 141.195130] [] secondary_start_kernel+0x15c/0x200 [ 141.195390] [<0000000080db81b4>] 0x80db81b4 [ 141.195603] [ 141.195685] The buggy address belongs to the variable: [ 141.196012] __cpu_logical_map+0x200/0x220 [ 141.196176] [ 141.196315] Memory state around the buggy address: [ 141.196586] ffff200009741c00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 141.196913] ffff200009741c80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 141.197158] >ffff200009741d00: 00 00 00 00 fa fa fa fa 00 00 00 00 00 00 00 00 [ 141.197487] ^ [ 141.197758] ffff200009741d80: 00 00 00 00 00 00 00 00 fa fa fa fa 00 00 00 00 [ 141.198060] ffff200009741e00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 141.198358] ================================================================== [ 141.198609] Disabling lock debugging due to kernel taint [ 141.198961] CPU1: Booted secondary processor [410fd051] This patch adds the check to make sure the cpu is valid. Fixes: commit 021f653791ad17e03f98 ("irqchip: gic-v3: Initial support for GICv3") Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 19d642e..24d388d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -646,6 +646,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, int enabled; u64 val; + if (cpu >= nr_cpu_ids) + return -EINVAL; + if (gic_irq_in_rdist(d)) return -EINVAL; -- cgit v0.10.2 From 008a1f2707488cf900bb20e29a8676cabeb6d405 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sun, 2 Jul 2017 22:00:41 +0200 Subject: parisc: Report SIGSEGV instead of SIGBUS when running out of stack commit 247462316f85a9e0479445c1a4223950b68ffac1 upstream. When a process runs out of stack the parisc kernel wrongly faults with SIGBUS instead of the expected SIGSEGV signal. This example shows how the kernel faults: do_page_fault() command='a.out' type=15 address=0xfaac2000 in libc-2.24.so[f8308000+16c000] trap #15: Data TLB miss fault, vm_start = 0xfa2c2000, vm_end = 0xfaac2000 The vma->vm_end value is the first address which does not belong to the vma, so adjust the check to include vma->vm_end to the range for which to send the SIGSEGV signal. This patch unbreaks building the debian libsigsegv package. Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 040c48f..b6f3b5e 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -366,7 +366,7 @@ bad_area: case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ if (!vma || - address < vma->vm_start || address > vma->vm_end) { + address < vma->vm_start || address >= vma->vm_end) { si.si_signo = SIGSEGV; si.si_code = SEGV_MAPERR; break; -- cgit v0.10.2 From bf1e4dc3b4fb0afcbd628b63b3756ae27023ef03 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 12 Jun 2017 23:18:30 -0700 Subject: parisc: use compat_sys_keyctl() commit b0f94efd5aa8daa8a07d7601714c2573266cd4c9 upstream. Architectures with a compat syscall table must put compat_sys_keyctl() in it, not sys_keyctl(). The parisc architecture was not doing this; fix it. Signed-off-by: Eric Biggers Acked-by: Helge Deller Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 3cfef1d..8ec2ff8 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -361,7 +361,7 @@ ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ - ENTRY_SAME(keyctl) + ENTRY_COMP(keyctl) ENTRY_SAME(ioprio_set) ENTRY_SAME(ioprio_get) ENTRY_SAME(inotify_init) -- cgit v0.10.2 From 8ed89cfc8692d7d2fa61a462619334107697aa99 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Mon, 3 Jul 2017 10:38:05 +0200 Subject: parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs commit 33f9e02495d15a061f0c94ef46f5103a2d0c20f3 upstream. Enabling parport pc driver on a B2600 (and probably other 64bit PARISC systems) produced following BUG: CPU: 0 PID: 1 Comm: swapper Not tainted 4.12.0-rc5-30198-g1132d5e #156 task: 000000009e050000 task.stack: 000000009e04c000 YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI PSW: 00001000000001101111111100001111 Not tainted r00-03 000000ff0806ff0f 000000009e04c990 0000000040871b78 000000009e04cac0 r04-07 0000000040c14de0 ffffffffffffffff 000000009e07f098 000000009d82d200 r08-11 000000009d82d210 0000000000000378 0000000000000000 0000000040c345e0 r12-15 0000000000000005 0000000040c345e0 0000000000000000 0000000040c9d5e0 r16-19 0000000040c345e0 00000000f00001c4 00000000f00001bc 0000000000000061 r20-23 000000009e04ce28 0000000000000010 0000000000000010 0000000040b89e40 r24-27 0000000000000003 0000000000ffffff 000000009d82d210 0000000040c14de0 r28-31 0000000000000000 000000009e04ca90 000000009e04cb40 0000000000000000 sr00-03 0000000000000000 0000000000000000 0000000000000000 0000000000000000 sr04-07 0000000000000000 0000000000000000 0000000000000000 0000000000000000 IASQ: 0000000000000000 0000000000000000 IAOQ: 00000000404aece0 00000000404aece4 IIR: 03ffe01f ISR: 0000000010340000 IOR: 000001781304cac8 CPU: 0 CR30: 000000009e04c000 CR31: 00000000e2976de2 ORIG_R28: 0000000000000200 IAOQ[0]: sba_dma_supported+0x80/0xd0 IAOQ[1]: sba_dma_supported+0x84/0xd0 RP(r2): parport_pc_probe_port+0x178/0x1200 Cause is a call to dma_coerce_mask_and_coherenet in parport_pc_probe_port, which PARISC DMA API doesn't handle very nicely. This commit gives back DMA_ERROR_CODE for DMA API calls, if device isn't capable of DMA transaction. Signed-off-by: Thomas Bogendoerfer Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 16e0246..cb7697d 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -20,6 +20,8 @@ ** flush/purge and allocate "regular" cacheable pages for everything. */ +#define DMA_ERROR_CODE (~(dma_addr_t)0) + #ifdef CONFIG_PA11 extern struct dma_map_ops pcxl_dma_ops; extern struct dma_map_ops pcx_dma_ops; @@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev) break; } } - BUG_ON(!dev->platform_data); return dev->platform_data; } - -#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) - + +#define GET_IOC(dev) ({ \ + void *__pdata = parisc_walk_tree(dev); \ + __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ +}) #ifdef CONFIG_IOMMU_CCIO struct parisc_device; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 3ed6238..c4953ec 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) + return DMA_ERROR_CODE; BUG_ON(size <= 0); @@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long)iova, size); @@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) + return 0; DBG_RUN_SG("%s() START %d entries\n", __func__, nents); @@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } DBG_RUN_SG("%s() START %d entries, %p,%x\n", __func__, nents, sg_virt(sglist), sglist->length); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 1133b5c..5c63b92 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -154,7 +154,10 @@ struct dino_device }; /* Looks nice and keeps the compiler happy */ -#define DINO_DEV(d) ((struct dino_device *) d) +#define DINO_DEV(d) ({ \ + void *__pdata = d; \ + BUG_ON(!__pdata); \ + (struct dino_device *)__pdata; }) /* diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 2ec2aef..bc286cb 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -111,8 +111,10 @@ static u32 lba_t32; /* Looks nice and keeps the compiler happy */ -#define LBA_DEV(d) ((struct lba_device *) (d)) - +#define LBA_DEV(d) ({ \ + void *__pdata = d; \ + BUG_ON(!__pdata); \ + (struct lba_device *)__pdata; }) /* ** Only allow 8 subsidiary busses per LBA diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 151b86b..56918d1 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask) return 0; ioc = GET_IOC(dev); + if (!ioc) + return 0; /* * check if mask is >= than the current max IO Virt Address @@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int pide; ioc = GET_IOC(dev); + if (!ioc) + return DMA_ERROR_CODE; /* save offset bits */ offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; @@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } offset = iova & ~IOVP_MASK; iova ^= offset; /* clear offset bits */ size += offset; @@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, DBG_RUN_SG("%s() START %d entries\n", __func__, nents); ioc = GET_IOC(dev); + if (!ioc) + return 0; /* Fast path single entry scatterlists. */ if (nents == 1) { @@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, __func__, nents, sg_virt(sglist), sglist->length); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } #ifdef SBA_COLLECT_STATS ioc->usg_calls++; -- cgit v0.10.2 From e3d2adaaf92760282888899d853535e08606cc98 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 29 May 2017 17:14:16 +0200 Subject: parisc/mm: Ensure IRQs are off in switch_mm() commit 649aa24254e85bf6bd7807dd372d083707852b1f upstream. This is because of commit f98db6013c55 ("sched/core: Add switch_mm_irqs_off() and use it in the scheduler") in which switch_mm_irqs_off() is called by the scheduler, vs switch_mm() which is used by use_mm(). This patch lets the parisc code mirror the x86 and powerpc code, ie. it disables interrupts in switch_mm(), and optimises the scheduler case by defining switch_mm_irqs_off(). Signed-off-by: Helge Deller Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 59be257..a812262 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context) mtctl(__space_to_prot(context), 8); } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) { - if (prev != next) { mtctl(__pa(next->pgd), 25); load_context(next->context); } } +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} +#define switch_mm_irqs_off switch_mm_irqs_off + #define deactivate_mm(tsk,mm) do { } while (0) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) -- cgit v0.10.2 From 68ea25f00ffec28c17e0576b027b2f9d6ef4ecca Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 25 May 2017 12:58:33 +0000 Subject: tools/lib/lockdep: Reduce MAX_LOCK_DEPTH to avoid overflowing lock_chain/: Depth commit 98dcea0cfd04e083ac74137ceb9a632604740e2d upstream. liblockdep has been broken since commit 75dd602a5198 ("lockdep: Fix lock_chain::base size"), as that adds a check that MAX_LOCK_DEPTH is within the range of lock_chain::depth and in liblockdep it is much too large. That should have resulted in a compiler error, but didn't because: - the check uses ARRAY_SIZE(), which isn't yet defined in liblockdep so is assumed to be an (undeclared) function - putting a function call inside a BUILD_BUG_ON() expression quietly turns it into some nonsense involving a variable-length array It did produce a compiler warning, but I didn't notice because liblockdep already produces too many warnings if -Wall is enabled (which I'll fix shortly). Even before that commit, which reduced lock_chain::depth from 8 bits to 6, MAX_LOCK_DEPTH was too large. Signed-off-by: Ben Hutchings Signed-off-by: Sasha Levin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/r/20170525130005.5947-3-alexander.levin@verizon.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h index c808c7d..d302142 100644 --- a/tools/lib/lockdep/uinclude/linux/lockdep.h +++ b/tools/lib/lockdep/uinclude/linux/lockdep.h @@ -8,7 +8,7 @@ #include #include -#define MAX_LOCK_DEPTH 2000UL +#define MAX_LOCK_DEPTH 63UL #define asmlinkage #define __visible -- cgit v0.10.2 From d2b64687b37f06c1302fb156d0b3aa9b347191ad Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 6 Jul 2017 15:35:28 -0700 Subject: thp, mm: fix crash due race in MADV_FREE handling commit bbf29ffc7f963bb894f84f0580c70cfea01c3892 upstream. Reinette reported the following crash: BUG: Bad page state in process log2exe pfn:57600 page:ffffea00015d8000 count:0 mapcount:0 mapping: (null) index:0x20200 flags: 0x4000000000040019(locked|uptodate|dirty|swapbacked) raw: 4000000000040019 0000000000000000 0000000000020200 00000000ffffffff raw: ffffea00015d8020 ffffea00015d8020 0000000000000000 0000000000000000 page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set bad because of flags: 0x1(locked) Modules linked in: rfcomm 8021q bnep intel_rapl x86_pkg_temp_thermal coretemp efivars btusb btrtl btbcm pwm_lpss_pci snd_hda_codec_hdmi btintel pwm_lpss snd_hda_codec_realtek snd_soc_skl snd_hda_codec_generic snd_soc_skl_ipc spi_pxa2xx_platform snd_soc_sst_ipc snd_soc_sst_dsp i2c_designware_platform i2c_designware_core snd_hda_ext_core snd_soc_sst_match snd_hda_intel snd_hda_codec mei_me snd_hda_core mei snd_soc_rt286 snd_soc_rl6347a snd_soc_core efivarfs CPU: 1 PID: 354 Comm: log2exe Not tainted 4.12.0-rc7-test-test #19 Hardware name: Intel corporation NUC6CAYS/NUC6CAYB, BIOS AYAPLCEL.86A.0027.2016.1108.1529 11/08/2016 Call Trace: bad_page+0x16a/0x1f0 free_pages_check_bad+0x117/0x190 free_hot_cold_page+0x7b1/0xad0 __put_page+0x70/0xa0 madvise_free_huge_pmd+0x627/0x7b0 madvise_free_pte_range+0x6f8/0x1150 __walk_page_range+0x6b5/0xe30 walk_page_range+0x13b/0x310 madvise_free_page_range.isra.16+0xad/0xd0 madvise_free_single_vma+0x2e4/0x470 SyS_madvise+0x8ce/0x1450 If somebody frees the page under us and we hold the last reference to it, put_page() would attempt to free the page before unlocking it. The fix is trivial reorder of operations. Dave said: "I came up with the exact same patch. For posterity, here's the test case, generated by syzkaller and trimmed down by Reinette: https://www.sr71.net/~dave/intel/log2.c And the config that helps detect this: https://www.sr71.net/~dave/intel/config-log2" Fixes: b8d3c4c3009d ("mm/huge_memory.c: don't split THP page when MADV_FREE syscall is called") Link: http://lkml.kernel.org/r/20170628101249.17879-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Reported-by: Reinette Chatre Acked-by: Dave Hansen Acked-by: Michal Hocko Acked-by: Minchan Kim Cc: Huang Ying Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e7d5db9..8258e9e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1373,8 +1373,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, get_page(page); spin_unlock(ptl); split_huge_page(page); - put_page(page); unlock_page(page); + put_page(page); goto out_unlocked; } -- cgit v0.10.2 From 8c6f19c250865c42669c005c51dc758644de1c6d Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Thu, 6 Jul 2017 15:35:31 -0700 Subject: kernel/extable.c: mark core_kernel_text notrace commit c0d80ddab89916273cb97114889d3f337bc370ae upstream. core_kernel_text is used by MIPS in its function graph trace processing, so having this method traced leads to an infinite set of recursive calls such as: Call Trace: ftrace_return_to_handler+0x50/0x128 core_kernel_text+0x10/0x1b8 prepare_ftrace_return+0x6c/0x114 ftrace_graph_caller+0x20/0x44 return_to_handler+0x10/0x30 return_to_handler+0x0/0x30 return_to_handler+0x0/0x30 ftrace_ops_no_ops+0x114/0x1bc core_kernel_text+0x10/0x1b8 core_kernel_text+0x10/0x1b8 core_kernel_text+0x10/0x1b8 ftrace_ops_no_ops+0x114/0x1bc core_kernel_text+0x10/0x1b8 prepare_ftrace_return+0x6c/0x114 ftrace_graph_caller+0x20/0x44 (...) Mark the function notrace to avoid it being traced. Link: http://lkml.kernel.org/r/1498028607-6765-1-git-send-email-marcin.nowakowski@imgtec.com Signed-off-by: Marcin Nowakowski Reviewed-by: Masami Hiramatsu Cc: Peter Zijlstra Cc: Thomas Meyer Cc: Ingo Molnar Cc: Steven Rostedt Cc: Daniel Borkmann Cc: Paul Gortmaker Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/extable.c b/kernel/extable.c index e820cce..4f06fc3 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr) return 0; } -int core_kernel_text(unsigned long addr) +int notrace core_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)_etext) -- cgit v0.10.2 From a48542e8b4ec9a7b3bf80edadb5aa229221826c4 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Mon, 10 Jul 2017 15:49:57 -0700 Subject: mm/list_lru.c: fix list_lru_count_node() to be race free commit 2c80cd57c74339889a8752b20862a16c28929c3a upstream. list_lru_count_node() iterates over all memcgs to get the total number of entries on the node but it can race with memcg_drain_all_list_lrus(), which migrates the entries from a dead cgroup to another. This can return incorrect number of entries from list_lru_count_node(). Fix this by keeping track of entries per node and simply return it in list_lru_count_node(). Link: http://lkml.kernel.org/r/1498707555-30525-1-git-send-email-stummala@codeaurora.org Signed-off-by: Sahitya Tummala Acked-by: Vladimir Davydov Cc: Jan Kara Cc: Alexander Polakov Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index cb0ba9f..fa7fd03 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -44,6 +44,7 @@ struct list_lru_node { /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ struct list_lru_memcg *memcg_lrus; #endif + long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { diff --git a/mm/list_lru.c b/mm/list_lru.c index 234676e..7a40fa2 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; + nlru->nr_items++; spin_unlock(&nlru->lock); return true; } @@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; + nlru->nr_items--; spin_unlock(&nlru->lock); return true; } @@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { - long count = 0; - int memcg_idx; + struct list_lru_node *nlru; - count += __list_lru_count_one(lru, nid, -1); - if (list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) - count += __list_lru_count_one(lru, nid, memcg_idx); - } - return count; + nlru = &lru->node[nid]; + return nlru->nr_items; } EXPORT_SYMBOL_GPL(list_lru_count_node); @@ -226,6 +223,7 @@ restart: assert_spin_locked(&nlru->lock); case LRU_REMOVED: isolated++; + nlru->nr_items--; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to -- cgit v0.10.2 From a9aa6522a1a58e025ba139d9b55c3350ef868cb8 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Mon, 10 Jul 2017 15:50:00 -0700 Subject: fs/dcache.c: fix spin lockup issue on nlru->lock commit b17c070fb624cf10162cf92ea5e1ec25cd8ac176 upstream. __list_lru_walk_one() acquires nlru spin lock (nlru->lock) for longer duration if there are more number of items in the lru list. As per the current code, it can hold the spin lock for upto maximum UINT_MAX entries at a time. So if there are more number of items in the lru list, then "BUG: spinlock lockup suspected" is observed in the below path: spin_bug+0x90 do_raw_spin_lock+0xfc _raw_spin_lock+0x28 list_lru_add+0x28 dput+0x1c8 path_put+0x20 terminate_walk+0x3c path_lookupat+0x100 filename_lookup+0x6c user_path_at_empty+0x54 SyS_faccessat+0xd0 el0_svc_naked+0x24 This nlru->lock is acquired by another CPU in this path - d_lru_shrink_move+0x34 dentry_lru_isolate_shrink+0x48 __list_lru_walk_one.isra.10+0x94 list_lru_walk_node+0x40 shrink_dcache_sb+0x60 do_remount_sb+0xbc do_emergency_remount+0xb0 process_one_work+0x228 worker_thread+0x2e0 kthread+0xf4 ret_from_fork+0x10 Fix this lockup by reducing the number of entries to be shrinked from the lru list to 1024 at once. Also, add cond_resched() before processing the lru list again. Link: http://marc.info/?t=149722864900001&r=1&w=2 Link: http://lkml.kernel.org/r/1498707575-2472-1-git-send-email-stummala@codeaurora.org Signed-off-by: Sahitya Tummala Suggested-by: Jan Kara Suggested-by: Vladimir Davydov Acked-by: Vladimir Davydov Cc: Alexander Polakov Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/fs/dcache.c b/fs/dcache.c index 4485a48..1dbc6b5 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1133,11 +1133,12 @@ void shrink_dcache_sb(struct super_block *sb) LIST_HEAD(dispose); freed = list_lru_walk(&sb->s_dentry_lru, - dentry_lru_isolate_shrink, &dispose, UINT_MAX); + dentry_lru_isolate_shrink, &dispose, 1024); this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); - } while (freed > 0); + cond_resched(); + } while (list_lru_count(&sb->s_dentry_lru) > 0); } EXPORT_SYMBOL(shrink_dcache_sb); -- cgit v0.10.2 From 93eae954050d3b8490481aa8f7d20822f09719ca Mon Sep 17 00:00:00 2001 From: Cyril Bur Date: Mon, 10 Jul 2017 15:52:21 -0700 Subject: checkpatch: silence perl 5.26.0 unescaped left brace warnings commit 8d81ae05d0176da1c54aeaed697fa34be5c5575e upstream. As of perl 5, version 26, subversion 0 (v5.26.0) some new warnings have occurred when running checkpatch. Unescaped left brace in regex is deprecated here (and will be fatal in Perl 5.30), passed through in regex; marked by <-- HERE in m/^(.\s*){ <-- HERE \s*/ at scripts/checkpatch.pl line 3544. Unescaped left brace in regex is deprecated here (and will be fatal in Perl 5.30), passed through in regex; marked by <-- HERE in m/^(.\s*){ <-- HERE \s*/ at scripts/checkpatch.pl line 3885. Unescaped left brace in regex is deprecated here (and will be fatal in Perl 5.30), passed through in regex; marked by <-- HERE in m/^(\+.*(?:do|\))){ <-- HERE / at scripts/checkpatch.pl line 4374. It seems perfectly reasonable to do as the warning suggests and simply escape the left brace in these three locations. Link: http://lkml.kernel.org/r/20170607060135.17384-1-cyrilbur@gmail.com Signed-off-by: Cyril Bur Acked-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index a8368d1..5517164 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3499,7 +3499,7 @@ sub process { $fixedline =~ s/\s*=\s*$/ = {/; fix_insert_line($fixlinenr, $fixedline); $fixedline = $line; - $fixedline =~ s/^(.\s*){\s*/$1/; + $fixedline =~ s/^(.\s*)\{\s*/$1/; fix_insert_line($fixlinenr, $fixedline); } } @@ -3840,7 +3840,7 @@ sub process { my $fixedline = rtrim($prevrawline) . " {"; fix_insert_line($fixlinenr, $fixedline); $fixedline = $rawline; - $fixedline =~ s/^(.\s*){\s*/$1\t/; + $fixedline =~ s/^(.\s*)\{\s*/$1\t/; if ($fixedline !~ /^\+\s*$/) { fix_insert_line($fixlinenr, $fixedline); } @@ -4329,7 +4329,7 @@ sub process { if (ERROR("SPACING", "space required before the open brace '{'\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/; + $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/; } } -- cgit v0.10.2 From 63c2f8f8c41bf80af068f0b2aef4c0e2bdc32c4a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 10 Jul 2017 15:52:37 -0700 Subject: binfmt_elf: use ELF_ET_DYN_BASE only for PIE commit eab09532d40090698b05a07c1c87f39fdbc5fab5 upstream. The ELF_ET_DYN_BASE position was originally intended to keep loaders away from ET_EXEC binaries. (For example, running "/lib/ld-linux.so.2 /bin/cat" might cause the subsequent load of /bin/cat into where the loader had been loaded.) With the advent of PIE (ET_DYN binaries with an INTERP Program Header), ELF_ET_DYN_BASE continued to be used since the kernel was only looking at ET_DYN. However, since ELF_ET_DYN_BASE is traditionally set at the top 1/3rd of the TASK_SIZE, a substantial portion of the address space is unused. For 32-bit tasks when RLIMIT_STACK is set to RLIM_INFINITY, programs are loaded above the mmap region. This means they can be made to collide (CVE-2017-1000370) or nearly collide (CVE-2017-1000371) with pathological stack regions. Lowering ELF_ET_DYN_BASE solves both by moving programs below the mmap region in all cases, and will now additionally avoid programs falling back to the mmap region by enforcing MAP_FIXED for program loads (i.e. if it would have collided with the stack, now it will fail to load instead of falling back to the mmap region). To allow for a lower ELF_ET_DYN_BASE, loaders (ET_DYN without INTERP) are loaded into the mmap region, leaving space available for either an ET_EXEC binary with a fixed location or PIE being loaded into mmap by the loader. Only PIE programs are loaded offset from ELF_ET_DYN_BASE, which means architectures can now safely lower their values without risk of loaders colliding with their subsequently loaded programs. For 64-bit, ELF_ET_DYN_BASE is best set to 4GB to allow runtimes to use the entire 32-bit address space for 32-bit pointers. Thanks to PaX Team, Daniel Micay, and Rik van Riel for inspiration and suggestions on how to implement this solution. Fixes: d1fd836dcf00 ("mm: split ET_DYN ASLR from mmap ASLR") Link: http://lkml.kernel.org/r/20170621173201.GA114489@beast Signed-off-by: Kees Cook Acked-by: Rik van Riel Cc: Daniel Micay Cc: Qualys Security Advisory Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Alexander Viro Cc: Dmitry Safonov Cc: Andy Lutomirski Cc: Grzegorz Andrejczuk Cc: Masahiro Yamada Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Heiko Carstens Cc: James Hogan Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Pratyush Anand Cc: Russell King Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 94aad63..c152db2 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -245,12 +245,13 @@ extern int force_personality32; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index cfd724f..1fdf4e5 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -911,17 +911,60 @@ static int load_elf_binary(struct linux_binprm *bprm) elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; + /* + * If we are loading ET_EXEC or we have already performed + * the ET_DYN load_addr calculations, proceed normally. + */ if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (loc->elf_ex.e_type == ET_DYN) { - /* Try and get dynamic programs out of the way of the - * default mmap base, as well as whatever program they - * might try to exec. This is because the brk will - * follow the loader, and is not movable. */ - load_bias = ELF_ET_DYN_BASE - vaddr; - if (current->flags & PF_RANDOMIZE) - load_bias += arch_mmap_rnd(); - load_bias = ELF_PAGESTART(load_bias); + /* + * This logic is run once for the first LOAD Program + * Header for ET_DYN binaries to calculate the + * randomization (load_bias) for all the LOAD + * Program Headers, and to calculate the entire + * size of the ELF mapping (total_size). (Note that + * load_addr_set is set to true later once the + * initial mapping is performed.) + * + * There are effectively two types of ET_DYN + * binaries: programs (i.e. PIE: ET_DYN with INTERP) + * and loaders (ET_DYN without INTERP, since they + * _are_ the ELF interpreter). The loaders must + * be loaded away from programs since the program + * may otherwise collide with the loader (especially + * for ET_EXEC which does not have a randomized + * position). For example to handle invocations of + * "./ld.so someprog" to test out a new version of + * the loader, the subsequent program that the + * loader loads must avoid the loader itself, so + * they cannot share the same load range. Sufficient + * room for the brk must be allocated with the + * loader as well, since brk must be available with + * the loader. + * + * Therefore, programs are loaded offset from + * ELF_ET_DYN_BASE and loaders are loaded into the + * independently randomized mmap region (0 load_bias + * without MAP_FIXED). + */ + if (elf_interpreter) { + load_bias = ELF_ET_DYN_BASE; + if (current->flags & PF_RANDOMIZE) + load_bias += arch_mmap_rnd(); + elf_flags |= MAP_FIXED; + } else + load_bias = 0; + + /* + * Since load_bias is used for all subsequent loading + * calculations, we must lower it by the first vaddr + * so that the remaining calculations based on the + * ELF vaddrs will be correctly offset. The result + * is then page aligned. + */ + load_bias = ELF_PAGESTART(load_bias - vaddr); + total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum); if (!total_size) { -- cgit v0.10.2 From abb79a56898e044715d8efd07bc1902d27a94870 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 10 Jul 2017 15:52:40 -0700 Subject: arm: move ELF_ET_DYN_BASE to 4MB commit 6a9af90a3bcde217a1c053e135f5f43e5d5fafbd upstream. Now that explicitly executed loaders are loaded in the mmap region, we have more freedom to decide where we position PIE binaries in the address space to avoid possible collisions with mmap or stack regions. 4MB is chosen here mainly to have parity with x86, where this is the traditional minimum load location, likely to avoid historically requiring a 4MB page table entry when only a portion of the first 4MB would be used (since the NULL address is avoided). For ARM the position could be 0x8000, the standard ET_EXEC load address, but that is needlessly close to the NULL address, and anyone running PIE on 32-bit ARM will have an MMU, so the tight mapping is not needed. Link: http://lkml.kernel.org/r/1498154792-49952-2-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: James Hogan Cc: Pratyush Anand Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Alexander Viro Cc: Andy Lutomirski Cc: Daniel Micay Cc: Dmitry Safonov Cc: Grzegorz Andrejczuk Cc: Kees Cook Cc: Masahiro Yamada Cc: Qualys Security Advisory Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index d2315ff..f13ae15 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* This is the base location for PIE (ET_DYN with INTERP) loads. */ +#define ELF_ET_DYN_BASE 0x400000UL /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we -- cgit v0.10.2 From 70779e0ebb37dadfdb31e53e534704600fa21f69 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 10 Jul 2017 15:52:44 -0700 Subject: arm64: move ELF_ET_DYN_BASE to 4GB / 4MB commit 02445990a96e60a67526510d8b00f7e3d14101c3 upstream. Now that explicitly executed loaders are loaded in the mmap region, we have more freedom to decide where we position PIE binaries in the address space to avoid possible collisions with mmap or stack regions. For 64-bit, align to 4GB to allow runtimes to use the entire 32-bit address space for 32-bit pointers. On 32-bit use 4MB, to match ARM. This could be 0x8000, the standard ET_EXEC load address, but that is needlessly close to the NULL address, and anyone running arm compat PIE will have an MMU, so the tight mapping is not needed. Link: http://lkml.kernel.org/r/1498251600-132458-4-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Mark Rutland Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index a55384f..afa23b0 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -113,12 +113,11 @@ #define ELF_EXEC_PAGESIZE PAGE_SIZE /* - * This is the location that an ET_DYN program is loaded if exec'ed. Typical - * use of this is to invoke "./ld.so someprog" to test out a new version of - * the loader. We need to make sure that it is out of the way of the program - * that it will "exec", and that there is sufficient room for the brk. + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +#define ELF_ET_DYN_BASE 0x100000000UL #ifndef __ASSEMBLY__ @@ -169,7 +168,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT -#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 -- cgit v0.10.2 From 90fb0f7aefc944d9e69208ab85766b20472cb173 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 10 Jul 2017 15:52:47 -0700 Subject: powerpc: move ELF_ET_DYN_BASE to 4GB / 4MB commit 47ebb09d54856500c5a5e14824781902b3bb738e upstream. Now that explicitly executed loaders are loaded in the mmap region, we have more freedom to decide where we position PIE binaries in the address space to avoid possible collisions with mmap or stack regions. For 64-bit, align to 4GB to allow runtimes to use the entire 32-bit address space for 32-bit pointers. On 32-bit use 4MB, which is the traditional x86 minimum load location, likely to avoid historically requiring a 4MB page table entry when only a portion of the first 4MB would be used (since the NULL address is avoided). Link: http://lkml.kernel.org/r/1498154792-49952-4-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Tested-by: Michael Ellerman Acked-by: Michael Ellerman Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: James Hogan Cc: Pratyush Anand Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index ee46ffe..743ad7a 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -23,12 +23,13 @@ #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0x20000000 +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ + 0x100000000UL) #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) -- cgit v0.10.2 From fbc877cd08e55f794bdd0d0363bce5147629ef16 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 10 Jul 2017 15:52:51 -0700 Subject: s390: reduce ELF_ET_DYN_BASE commit a73dc5370e153ac63718d850bddf0c9aa9d871e6 upstream. Now that explicitly executed loaders are loaded in the mmap region, we have more freedom to decide where we position PIE binaries in the address space to avoid possible collisions with mmap or stack regions. For 64-bit, align to 4GB to allow runtimes to use the entire 32-bit address space for 32-bit pointers. On 32-bit use 4MB, which is the traditional x86 minimum load location, likely to avoid historically requiring a 4MB page table entry when only a portion of the first 4MB would be used (since the NULL address is avoided). For s390 the position could be 0x10000, but that is needlessly close to the NULL address. Link: http://lkml.kernel.org/r/1498154792-49952-5-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: James Hogan Cc: Pratyush Anand Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1736c7d..8d665f1 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -158,14 +158,13 @@ extern unsigned int vdso_enabled; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. 64-bit - tasks are aligned to 4GB. */ -#define ELF_ET_DYN_BASE (is_compat_task() ? \ - (STACK_TOP / 3 * 2) : \ - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ -- cgit v0.10.2 From f31c4f65dd09319ba21cf825fa36daf0c1ddf958 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 7 Jul 2017 11:57:29 -0700 Subject: exec: Limit arg stack to at most 75% of _STK_LIM commit da029c11e6b12f321f36dac8771e833b65cec962 upstream. To avoid pathological stack usage or the need to special-case setuid execs, just limit all arg stack usage to at most 75% of _STK_LIM (6MB). Signed-off-by: Kees Cook Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/fs/exec.c b/fs/exec.c index 9144140..b8c43be 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -215,8 +215,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; - unsigned long ptr_size; - struct rlimit *rlim; + unsigned long ptr_size, limit; /* * Since the stack will hold pointers to the strings, we @@ -245,14 +244,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, return page; /* - * Limit to 1/4-th the stack size for the argv+env strings. + * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM + * (whichever is smaller) for the argv+env strings. * This ensures that: * - the remaining binfmt code will not run out of stack space, * - the program will have a reasonable amount of stack left * to work from. */ - rlim = current->signal->rlim; - if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + limit = _STK_LIM / 4 * 3; + limit = min(limit, rlimit(RLIMIT_STACK) / 4); + if (size > limit) goto fail; } -- cgit v0.10.2 From 445a945ad67bfbf4d1aed7f290b1465734c9a720 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 21 Jun 2017 22:45:08 +0100 Subject: ARM64: dts: marvell: armada37xx: Fix timer interrupt specifiers commit 88cda00733f0731711c76e535d4972c296ac512e upstream. Contrary to popular belief, PPIs connected to a GICv3 to not have an affinity field similar to that of GICv2. That is consistent with the fact that GICv3 is designed to accomodate thousands of CPUs, and fitting them as a bitmap in a byte is... difficult. Fixes: adbc3695d9e4 ("arm64: dts: add the Marvell Armada 3700 family and a development board") Signed-off-by: Marc Zyngier Signed-off-by: Gregory CLEMENT Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index e9bd587..49a5d8c 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -75,14 +75,10 @@ timer { compatible = "arm,armv8-timer"; - interrupts = , - , - , - ; + interrupts = , + , + , + ; }; soc { -- cgit v0.10.2 From 63c634cf95475ef86cdf15eaef5abeb5888689ab Mon Sep 17 00:00:00 2001 From: Adam Borowski Date: Sat, 3 Jun 2017 09:35:06 +0200 Subject: vt: fix unchecked __put_user() in tioclinux ioctls commit 6987dc8a70976561d22450b5858fc9767788cc1c upstream. Only read access is checked before this call. Actually, at the moment this is not an issue, as every in-tree arch does the same manual checks for VERIFY_READ vs VERIFY_WRITE, relying on the MMU to tell them apart, but this wasn't the case in the past and may happen again on some odd arch in the future. If anyone cares about 3.7 and earlier, this is a security hole (untested) on real 80386 CPUs. Signed-off-by: Adam Borowski Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8c3bf3d..ce2c3c6 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2711,13 +2711,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) * related to the kernel should not use this. */ data = vt_get_shift_state(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_GETMOUSEREPORTING: console_lock(); /* May be overkill */ data = mouse_reporting(); console_unlock(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETVESABLANK: console_lock(); @@ -2726,7 +2726,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) break; case TIOCL_GETKMSGREDIRECT: data = vt_get_kmsg_redirect(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETKMSGREDIRECT: if (!capable(CAP_SYS_ADMIN)) { -- cgit v0.10.2 From ac5e9e801f47311bdbd1dc490a1b90df676b815c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 28 Apr 2017 20:11:09 -0700 Subject: rcu: Add memory barriers for NOCB leader wakeup commit 6b5fc3a1331810db407c9e0e673dc1837afdc9d0 upstream. Wait/wakeup operations do not guarantee ordering on their own. Instead, either locking or memory barriers are required. This commit therefore adds memory barriers to wake_nocb_leader() and nocb_leader_wait(). Signed-off-by: Paul E. McKenney Tested-by: Krister Johansen Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 56583e7..e3944c4 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1767,6 +1767,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { /* Prior smp_mb__after_atomic() orders against prior enqueue. */ WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); + smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ swake_up(&rdp_leader->nocb_wq); } } @@ -2021,6 +2022,7 @@ wait_again: * nocb_gp_head, where they await a grace period. */ gotcbs = false; + smp_mb(); /* wakeup before ->nocb_head reads. */ for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); if (!rdp->nocb_gp_head) -- cgit v0.10.2 From 7d976da043459fe2a476b95c4da5f713c5d076fc Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 9 Jun 2017 10:59:07 +0100 Subject: nvmem: core: fix leaks on registration errors commit 3360acdf839170b612f5b212539694c20e3f16d0 upstream. Make sure to deregister and release the nvmem device and underlying memory on registration errors. Note that the private data must be freed using put_device() once the struct device has been initialised. Also note that there's a related reference leak in the deregistration function as reported by Mika Westerberg which is being fixed separately. Fixes: b6c217ab9be6 ("nvmem: Add backwards compatibility support for older EEPROM drivers.") Fixes: eace75cfdcf7 ("nvmem: Add a simple NVMEM framework for nvmem providers") Cc: Andrew Lunn Cc: Srinivas Kandagatla Cc: Mika Westerberg Signed-off-by: Johan Hovold Acked-by: Andrey Smirnov Signed-off-by: Srinivas Kandagatla Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 965911d..1b4d93e 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -488,21 +488,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) rval = device_add(&nvmem->dev); if (rval) - goto out; + goto err_put_device; if (config->compat) { rval = nvmem_setup_compat(nvmem, config); if (rval) - goto out; + goto err_device_del; } if (config->cells) nvmem_add_cells(nvmem, config); return nvmem; -out: - ida_simple_remove(&nvmem_ida, nvmem->id); - kfree(nvmem); + +err_device_del: + device_del(&nvmem->dev); +err_put_device: + put_device(&nvmem->dev); + return ERR_PTR(rval); } EXPORT_SYMBOL_GPL(nvmem_register); -- cgit v0.10.2 From e260db7576765e0d82991dbe4bab2ec058ecb67a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 15 May 2017 14:42:07 -0500 Subject: mnt: In umount propagation reparent in a separate pass commit 570487d3faf2a1d8a220e6ee10f472163123d7da upstream. It was observed that in some pathlogical cases that the current code does not unmount everything it should. After investigation it was determined that the issue is that mnt_change_mntpoint can can change which mounts are available to be unmounted during mount propagation which is wrong. The trivial reproducer is: $ cat ./pathological.sh mount -t tmpfs test-base /mnt cd /mnt mkdir 1 2 1/1 mount --bind 1 1 mount --make-shared 1 mount --bind 1 2 mount --bind 1/1 1/1 mount --bind 1/1 1/1 echo grep test-base /proc/self/mountinfo umount 1/1 echo grep test-base /proc/self/mountinfo $ unshare -Urm ./pathological.sh The expected output looks like: 46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000 47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 49 54 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 50 53 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 51 49 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 54 47 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 53 48 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 52 50 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000 47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 The output without the fix looks like: 46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000 47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 49 54 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 50 53 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 51 49 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 54 47 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 53 48 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 52 50 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000 47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 52 48 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000 That last mount in the output was in the propgation tree to be unmounted but was missed because the mnt_change_mountpoint changed it's parent before the walk through the mount propagation tree observed it. Fixes: 1064f874abc0 ("mnt: Tuck mounts under others instead of creating shadow/side mounts.") Acked-by: Andrei Vagin Reviewed-by: Ram Pai Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman diff --git a/fs/mount.h b/fs/mount.h index d8295f2..3d204e1 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -58,6 +58,7 @@ struct mount { struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ + struct list_head mnt_reparent; /* reparent list entry */ #ifdef CONFIG_FSNOTIFY struct hlist_head mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; diff --git a/fs/namespace.c b/fs/namespace.c index 5e35057..677163c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name) INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); + INIT_LIST_HEAD(&mnt->mnt_reparent); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif diff --git a/fs/pnode.c b/fs/pnode.c index b394ca5..948e85a 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -441,7 +441,7 @@ static void mark_umount_candidates(struct mount *mnt) * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ -static void __propagate_umount(struct mount *mnt) +static void __propagate_umount(struct mount *mnt, struct list_head *to_reparent) { struct mount *parent = mnt->mnt_parent; struct mount *m; @@ -466,17 +466,38 @@ static void __propagate_umount(struct mount *mnt) */ topper = find_topper(child); if (topper) - mnt_change_mountpoint(child->mnt_parent, child->mnt_mp, - topper); + list_add_tail(&topper->mnt_reparent, to_reparent); - if (list_empty(&child->mnt_mounts)) { + if (topper || list_empty(&child->mnt_mounts)) { list_del_init(&child->mnt_child); + list_del_init(&child->mnt_reparent); child->mnt.mnt_flags |= MNT_UMOUNT; list_move_tail(&child->mnt_list, &mnt->mnt_list); } } } +static void reparent_mounts(struct list_head *to_reparent) +{ + while (!list_empty(to_reparent)) { + struct mount *mnt, *parent; + struct mountpoint *mp; + + mnt = list_first_entry(to_reparent, struct mount, mnt_reparent); + list_del_init(&mnt->mnt_reparent); + + /* Where should this mount be reparented to? */ + mp = mnt->mnt_mp; + parent = mnt->mnt_parent; + while (parent->mnt.mnt_flags & MNT_UMOUNT) { + mp = parent->mnt_mp; + parent = parent->mnt_parent; + } + + mnt_change_mountpoint(parent, mp, mnt); + } +} + /* * collect all mounts that receive propagation from the mount in @list, * and return these additional mounts in the same list. @@ -487,11 +508,15 @@ static void __propagate_umount(struct mount *mnt) int propagate_umount(struct list_head *list) { struct mount *mnt; + LIST_HEAD(to_reparent); list_for_each_entry_reverse(mnt, list, mnt_list) mark_umount_candidates(mnt); list_for_each_entry(mnt, list, mnt_list) - __propagate_umount(mnt); + __propagate_umount(mnt, &to_reparent); + + reparent_mounts(&to_reparent); + return 0; } -- cgit v0.10.2 From bb4fbf094b440a9209ed88c8681960d4b26eec0f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 24 Oct 2016 16:16:13 -0500 Subject: mnt: In propgate_umount handle visiting mounts in any order commit 99b19d16471e9c3faa85cad38abc9cbbe04c6d55 upstream. While investigating some poor umount performance I realized that in the case of overlapping mount trees where some of the mounts are locked the code has been failing to unmount all of the mounts it should have been unmounting. This failure to unmount all of the necessary mounts can be reproduced with: $ cat locked_mounts_test.sh mount -t tmpfs test-base /mnt mount --make-shared /mnt mkdir -p /mnt/b mount -t tmpfs test1 /mnt/b mount --make-shared /mnt/b mkdir -p /mnt/b/10 mount -t tmpfs test2 /mnt/b/10 mount --make-shared /mnt/b/10 mkdir -p /mnt/b/10/20 mount --rbind /mnt/b /mnt/b/10/20 unshare -Urm --propagation unchaged /bin/sh -c 'sleep 5; if [ $(grep test /proc/self/mountinfo | wc -l) -eq 1 ] ; then echo SUCCESS ; else echo FAILURE ; fi' sleep 1 umount -l /mnt/b wait %% $ unshare -Urm ./locked_mounts_test.sh This failure is corrected by removing the prepass that marks mounts that may be umounted. A first pass is added that umounts mounts if possible and if not sets mount mark if they could be unmounted if they weren't locked and adds them to a list to umount possibilities. This first pass reconsiders the mounts parent if it is on the list of umount possibilities, ensuring that information of umoutability will pass from child to mount parent. A second pass then walks through all mounts that are umounted and processes their children unmounting them or marking them for reparenting. A last pass cleans up the state on the mounts that could not be umounted and if applicable reparents them to their first parent that remained mounted. While a bit longer than the old code this code is much more robust as it allows information to flow up from the leaves and down from the trunk making the order in which mounts are encountered in the umount propgation tree irrelevant. Fixes: 0c56fe31420c ("mnt: Don't propagate unmounts to locked mounts") Reviewed-by: Andrei Vagin Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman diff --git a/fs/mount.h b/fs/mount.h index 3d204e1..3603884 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -58,7 +58,7 @@ struct mount { struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ - struct list_head mnt_reparent; /* reparent list entry */ + struct list_head mnt_umounting; /* list entry for umount propagation */ #ifdef CONFIG_FSNOTIFY struct hlist_head mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; diff --git a/fs/namespace.c b/fs/namespace.c index 677163c..d7360f9 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -237,7 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name) INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); - INIT_LIST_HEAD(&mnt->mnt_reparent); + INIT_LIST_HEAD(&mnt->mnt_umounting); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif diff --git a/fs/pnode.c b/fs/pnode.c index 948e85a..acc2eef 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -415,86 +415,95 @@ void propagate_mount_unlock(struct mount *mnt) } } -/* - * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. - */ -static void mark_umount_candidates(struct mount *mnt) +static void umount_one(struct mount *mnt, struct list_head *to_umount) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; - - BUG_ON(parent == mnt); - - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - if (!child || (child->mnt.mnt_flags & MNT_UMOUNT)) - continue; - if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) { - SET_MNT_MARK(child); - } - } + CLEAR_MNT_MARK(mnt); + mnt->mnt.mnt_flags |= MNT_UMOUNT; + list_del_init(&mnt->mnt_child); + list_del_init(&mnt->mnt_umounting); + list_move_tail(&mnt->mnt_list, to_umount); } /* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ -static void __propagate_umount(struct mount *mnt, struct list_head *to_reparent) +static bool __propagate_umount(struct mount *mnt, + struct list_head *to_umount, + struct list_head *to_restore) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; + bool progress = false; + struct mount *child; - BUG_ON(parent == mnt); + /* + * The state of the parent won't change if this mount is + * already unmounted or marked as without children. + */ + if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) + goto out; - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *topper; - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - /* - * umount the child only if the child has no children - * and the child is marked safe to unmount. - */ - if (!child || !IS_MNT_MARKED(child)) + /* Verify topper is the only grandchild that has not been + * speculatively unmounted. + */ + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { + if (child->mnt_mountpoint == mnt->mnt.mnt_root) continue; - CLEAR_MNT_MARK(child); + if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) + continue; + /* Found a mounted child */ + goto children; + } - /* If there is exactly one mount covering all of child - * replace child with that mount. - */ - topper = find_topper(child); - if (topper) - list_add_tail(&topper->mnt_reparent, to_reparent); + /* Mark mounts that can be unmounted if not locked */ + SET_MNT_MARK(mnt); + progress = true; - if (topper || list_empty(&child->mnt_mounts)) { - list_del_init(&child->mnt_child); - list_del_init(&child->mnt_reparent); - child->mnt.mnt_flags |= MNT_UMOUNT; - list_move_tail(&child->mnt_list, &mnt->mnt_list); + /* If a mount is without children and not locked umount it. */ + if (!IS_MNT_LOCKED(mnt)) { + umount_one(mnt, to_umount); + } else { +children: + list_move_tail(&mnt->mnt_umounting, to_restore); + } +out: + return progress; +} + +static void umount_list(struct list_head *to_umount, + struct list_head *to_restore) +{ + struct mount *mnt, *child, *tmp; + list_for_each_entry(mnt, to_umount, mnt_list) { + list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { + /* topper? */ + if (child->mnt_mountpoint == mnt->mnt.mnt_root) + list_move_tail(&child->mnt_umounting, to_restore); + else + umount_one(child, to_umount); } } } -static void reparent_mounts(struct list_head *to_reparent) +static void restore_mounts(struct list_head *to_restore) { - while (!list_empty(to_reparent)) { + /* Restore mounts to a clean working state */ + while (!list_empty(to_restore)) { struct mount *mnt, *parent; struct mountpoint *mp; - mnt = list_first_entry(to_reparent, struct mount, mnt_reparent); - list_del_init(&mnt->mnt_reparent); + mnt = list_first_entry(to_restore, struct mount, mnt_umounting); + CLEAR_MNT_MARK(mnt); + list_del_init(&mnt->mnt_umounting); - /* Where should this mount be reparented to? */ + /* Should this mount be reparented? */ mp = mnt->mnt_mp; parent = mnt->mnt_parent; while (parent->mnt.mnt_flags & MNT_UMOUNT) { mp = parent->mnt_mp; parent = parent->mnt_parent; } - - mnt_change_mountpoint(parent, mp, mnt); + if (parent != mnt->mnt_parent) + mnt_change_mountpoint(parent, mp, mnt); } } @@ -508,15 +517,34 @@ static void reparent_mounts(struct list_head *to_reparent) int propagate_umount(struct list_head *list) { struct mount *mnt; - LIST_HEAD(to_reparent); - - list_for_each_entry_reverse(mnt, list, mnt_list) - mark_umount_candidates(mnt); - - list_for_each_entry(mnt, list, mnt_list) - __propagate_umount(mnt, &to_reparent); + LIST_HEAD(to_restore); + LIST_HEAD(to_umount); + + list_for_each_entry(mnt, list, mnt_list) { + struct mount *parent = mnt->mnt_parent; + struct mount *m; + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { + struct mount *child = __lookup_mnt(&m->mnt, + mnt->mnt_mountpoint); + if (!child) + continue; + + /* Check the child and parents while progress is made */ + while (__propagate_umount(child, + &to_umount, &to_restore)) { + /* Is the parent a umount candidate? */ + child = child->mnt_parent; + if (list_empty(&child->mnt_umounting)) + break; + } + } + } - reparent_mounts(&to_reparent); + umount_list(&to_umount, &to_restore); + restore_mounts(&to_restore); + list_splice_tail(&to_umount, list); return 0; } -- cgit v0.10.2 From 54fcb2303ef40bc9476fc698ad292c569e5da4fb Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 24 Oct 2016 17:25:19 -0500 Subject: mnt: Make propagate_umount less slow for overlapping mount propagation trees commit 296990deb389c7da21c78030376ba244dc1badf5 upstream. Andrei Vagin pointed out that time to executue propagate_umount can go non-linear (and take a ludicrious amount of time) when the mount propogation trees of the mounts to be unmunted by a lazy unmount overlap. Make the walk of the mount propagation trees nearly linear by remembering which mounts have already been visited, allowing subsequent walks to detect when walking a mount propgation tree or a subtree of a mount propgation tree would be duplicate work and to skip them entirely. Walk the list of mounts whose propgatation trees need to be traversed from the mount highest in the mount tree to mounts lower in the mount tree so that odds are higher that the code will walk the largest trees first, allowing later tree walks to be skipped entirely. Add cleanup_umount_visitation to remover the code's memory of which mounts have been visited. Add the functions last_slave and skip_propagation_subtree to allow skipping appropriate parts of the mount propagation tree without needing to change the logic of the rest of the code. A script to generate overlapping mount propagation trees: $ cat runs.h set -e mount -t tmpfs zdtm /mnt mkdir -p /mnt/1 /mnt/2 mount -t tmpfs zdtm /mnt/1 mount --make-shared /mnt/1 mkdir /mnt/1/1 iteration=10 if [ -n "$1" ] ; then iteration=$1 fi for i in $(seq $iteration); do mount --bind /mnt/1/1 /mnt/1/1 done mount --rbind /mnt/1 /mnt/2 TIMEFORMAT='%Rs' nr=$(( ( 2 ** ( $iteration + 1 ) ) + 1 )) echo -n "umount -l /mnt/1 -> $nr " time umount -l /mnt/1 nr=$(cat /proc/self/mountinfo | grep zdtm | wc -l ) time umount -l /mnt/2 $ for i in $(seq 9 19); do echo $i; unshare -Urm bash ./run.sh $i; done Here are the performance numbers with and without the patch: mhash | 8192 | 8192 | 1048576 | 1048576 mounts | before | after | before | after ------------------------------------------------ 1025 | 0.040s | 0.016s | 0.038s | 0.019s 2049 | 0.094s | 0.017s | 0.080s | 0.018s 4097 | 0.243s | 0.019s | 0.206s | 0.023s 8193 | 1.202s | 0.028s | 1.562s | 0.032s 16385 | 9.635s | 0.036s | 9.952s | 0.041s 32769 | 60.928s | 0.063s | 44.321s | 0.064s 65537 | | 0.097s | | 0.097s 131073 | | 0.233s | | 0.176s 262145 | | 0.653s | | 0.344s 524289 | | 2.305s | | 0.735s 1048577 | | 7.107s | | 2.603s Andrei Vagin reports fixing the performance problem is part of the work to fix CVE-2016-6213. Fixes: a05964f3917c ("[PATCH] shared mounts handling: umount") Reported-by: Andrei Vagin Reviewed-by: Andrei Vagin Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman diff --git a/fs/pnode.c b/fs/pnode.c index acc2eef..d15c63e 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p) return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); } +static inline struct mount *last_slave(struct mount *p) +{ + return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); +} + static inline struct mount *next_slave(struct mount *p) { return list_entry(p->mnt_slave.next, struct mount, mnt_slave); @@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m, } } +static struct mount *skip_propagation_subtree(struct mount *m, + struct mount *origin) +{ + /* + * Advance m such that propagation_next will not return + * the slaves of m. + */ + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) + m = last_slave(m); + + return m; +} + static struct mount *next_group(struct mount *m, struct mount *origin) { while (1) { @@ -507,6 +525,15 @@ static void restore_mounts(struct list_head *to_restore) } } +static void cleanup_umount_visitations(struct list_head *visited) +{ + while (!list_empty(visited)) { + struct mount *mnt = + list_first_entry(visited, struct mount, mnt_umounting); + list_del_init(&mnt->mnt_umounting); + } +} + /* * collect all mounts that receive propagation from the mount in @list, * and return these additional mounts in the same list. @@ -519,11 +546,23 @@ int propagate_umount(struct list_head *list) struct mount *mnt; LIST_HEAD(to_restore); LIST_HEAD(to_umount); + LIST_HEAD(visited); - list_for_each_entry(mnt, list, mnt_list) { + /* Find candidates for unmounting */ + list_for_each_entry_reverse(mnt, list, mnt_list) { struct mount *parent = mnt->mnt_parent; struct mount *m; + /* + * If this mount has already been visited it is known that it's + * entire peer group and all of their slaves in the propagation + * tree for the mountpoint has already been visited and there is + * no need to visit them again. + */ + if (!list_empty(&mnt->mnt_umounting)) + continue; + + list_add_tail(&mnt->mnt_umounting, &visited); for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { struct mount *child = __lookup_mnt(&m->mnt, @@ -531,6 +570,27 @@ int propagate_umount(struct list_head *list) if (!child) continue; + if (!list_empty(&child->mnt_umounting)) { + /* + * If the child has already been visited it is + * know that it's entire peer group and all of + * their slaves in the propgation tree for the + * mountpoint has already been visited and there + * is no need to visit this subtree again. + */ + m = skip_propagation_subtree(m, parent); + continue; + } else if (child->mnt.mnt_flags & MNT_UMOUNT) { + /* + * We have come accross an partially unmounted + * mount in list that has not been visited yet. + * Remember it has been visited and continue + * about our merry way. + */ + list_add_tail(&child->mnt_umounting, &visited); + continue; + } + /* Check the child and parents while progress is made */ while (__propagate_umount(child, &to_umount, &to_restore)) { @@ -544,6 +604,7 @@ int propagate_umount(struct list_head *list) umount_list(&to_umount, &to_restore); restore_mounts(&to_restore); + cleanup_umount_visitations(&visited); list_splice_tail(&to_umount, list); return 0; -- cgit v0.10.2 From 1e6f1af808c11a011b9c15e7e2e92f01885855b6 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 29 Jun 2017 08:46:12 -0700 Subject: selftests/capabilities: Fix the test_execve test commit 796a3bae2fba6810427efdb314a1c126c9490fb3 upstream. test_execve does rather odd mount manipulations to safely create temporary setuid and setgid executables that aren't visible to the rest of the system. Those executables end up in the test's cwd, but that cwd is MNT_DETACHed. The core namespace code considers MNT_DETACHed trees to belong to no mount namespace at all and, in general, MNT_DETACHed trees are only barely function. This interacted with commit 380cf5ba6b0a ("fs: Treat foreign mounts as nosuid") to cause all MNT_DETACHed trees to act as though they're nosuid, breaking the test. Fix it by just not detaching the tree. It's still in a private mount namespace and is therefore still invisible to the rest of the system (except via /proc, and the same nosuid logic will protect all other programs on the system from believing in test_execve's setuid bits). While we're at it, fix some blatant whitespace problems. Reported-by: Naresh Kamboju Fixes: 380cf5ba6b0a ("fs: Treat foreign mounts as nosuid") Cc: "Eric W. Biederman" Cc: Kees Cook Cc: Shuah Khan Cc: Greg KH Cc: linux-kselftest@vger.kernel.org Signed-off-by: Andy Lutomirski Acked-by: Greg Kroah-Hartman Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c index 10a21a9..763f37f 100644 --- a/tools/testing/selftests/capabilities/test_execve.c +++ b/tools/testing/selftests/capabilities/test_execve.c @@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void) if (chdir(cwd) != 0) err(1, "chdir to private tmpfs"); - - if (umount2(".", MNT_DETACH) != 0) - err(1, "detach private tmpfs"); } static void copy_fromat_to(int fromfd, const char *fromname, const char *toname) @@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path) err(1, "chown"); if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0) err(1, "chmod"); -} + } capng_get_caps_process(); @@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path) } else { printf("[RUN]\tNon-root +ia, sgidnonroot => i\n"); exec_other_validate_cap("./validate_cap_sgidnonroot", - false, false, true, false); + false, false, true, false); if (fork_wait()) { printf("[RUN]\tNon-root +ia, sgidroot => i\n"); -- cgit v0.10.2 From 38dfd2e3a67367c701db106a1b600212efc4f93a Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 14 Jul 2017 14:49:38 -0700 Subject: mm: fix overflow check in expand_upwards() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 37511fb5c91db93d8bd6e3f52f86e5a7ff7cfcdf upstream. Jörn Engel noticed that the expand_upwards() function might not return -ENOMEM in case the requested address is (unsigned long)-PAGE_SIZE and if the architecture didn't defined TASK_SIZE as multiple of PAGE_SIZE. Affected architectures are arm, frv, m68k, blackfin, h8300 and xtensa which all define TASK_SIZE as 0xffffffff, but since none of those have an upwards-growing stack we currently have no actual issue. Nevertheless let's fix this just in case any of the architectures with an upward-growing stack (currently parisc, metag and partly ia64) define TASK_SIZE similar. Link: http://lkml.kernel.org/r/20170702192452.GA11868@p100.box Fixes: bd726c90b6b8 ("Allow stack to grow up to address space limit") Signed-off-by: Helge Deller Reported-by: Jörn Engel Cc: Hugh Dickins Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/mm/mmap.c b/mm/mmap.c index 145d3d5..75d263b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2228,7 +2228,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; - if (address >= TASK_SIZE) + if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; -- cgit v0.10.2 From 2ff2cc768eb75b82b5df60d06e8316d4ea62c111 Mon Sep 17 00:00:00 2001 From: Martin Hicks Date: Tue, 2 May 2017 09:38:35 -0400 Subject: crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 03d2c5114c95797c0aa7d9f463348b171a274fd4 upstream. An updated patch that also handles the additional key length requirements for the AEAD algorithms. The max keysize is not 96. For SHA384/512 it's 128, and for the AEAD algorithms it's longer still. Extend the max keysize for the AEAD size for AES256 + HMAC(SHA512). Fixes: 357fb60502ede ("crypto: talitos - add sha224, sha384 and sha512 to existing AEAD algorithms") Signed-off-by: Martin Hicks Acked-by: Horia Geantă Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 0418a2f..571de2f 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev) * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP */ #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) -#define TALITOS_MAX_KEY_SIZE 96 +#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ struct talitos_ctx { @@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + if (keylen > TALITOS_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; -- cgit v0.10.2 From 81cc2ef2677b32252d23b50e6cef7c994bf6ec03 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Wed, 28 Jun 2017 10:22:03 +0300 Subject: crypto: atmel - only treat EBUSY as transient if backlog commit 1606043f214f912a52195293614935811a6e3e53 upstream. The Atmel SHA driver was treating -EBUSY as indication of queueing to backlog without checking that backlog is enabled for the request. Fix it by checking request flags. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 97e3479..6fcf25f 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1000,7 +1000,9 @@ static int atmel_sha_finup(struct ahash_request *req) ctx->flags |= SHA_FLAGS_FINUP; err1 = atmel_sha_update(req); - if (err1 == -EINPROGRESS || err1 == -EBUSY) + if (err1 == -EINPROGRESS || + (err1 == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) return err1; /* -- cgit v0.10.2 From db923288f37f42fae8afe58d448ed77e43cf56b1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 4 Jul 2017 12:21:12 +0800 Subject: crypto: sha1-ssse3 - Disable avx2 commit b82ce24426a4071da9529d726057e4e642948667 upstream. It has been reported that sha1-avx2 can cause page faults by reading beyond the end of the input. This patch disables it until it can be fixed. Fixes: 7c1da8d0d046 ("crypto: sha - SHA1 transform x86_64 AVX2") Reported-by: Jan Stancek Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index fc61739..f960a04 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, static bool avx2_usable(void) { - if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) + if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) return true; -- cgit v0.10.2 From 48a9dff213986c62fe5405f78ad5433c764dcd3e Mon Sep 17 00:00:00 2001 From: David Gstir Date: Wed, 28 Jun 2017 15:27:10 +0200 Subject: crypto: caam - properly set IV after {en,de}crypt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 854b06f768794cd664886ec3ba3a5b1c58d42167 upstream. Certain cipher modes like CTS expect the IV (req->info) of ablkcipher_request (or equivalently req->iv of skcipher_request) to contain the last ciphertext block when the {en,de}crypt operation is done. This is currently not the case for the CAAM driver which in turn breaks e.g. cts(cbc(aes)) when the CAAM driver is enabled. This patch fixes the CAAM driver to properly set the IV after the {en,de}crypt operation of ablkcipher finishes. This issue was revealed by the changes in the SW CTS mode in commit 0605c41cc53ca ("crypto: cts - Convert to skcipher") Signed-off-by: David Gstir Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3bda6e5..0d743c6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -2014,10 +2014,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; -#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -2037,6 +2037,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, #endif ablkcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. This is used e.g. by the CTS mode. + */ + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, + ivsize, 0); + kfree(edesc); ablkcipher_request_complete(req, err); @@ -2047,10 +2055,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; -#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -2069,6 +2077,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, #endif ablkcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + ivsize, 0); + kfree(edesc); ablkcipher_request_complete(req, err); -- cgit v0.10.2 From 80495c708490eccbd5a1e8e934a674c93df1b9d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Fri, 7 Jul 2017 16:57:06 +0300 Subject: crypto: caam - fix signals handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7459e1d25ffefa2b1be799477fcc1f6c62f6cec7 upstream. Driver does not properly handle the case when signals interrupt wait_for_completion_interruptible(): -it does not check for return value -completion structure is allocated on stack; in case a signal interrupts the sleep, it will go out of scope, causing the worker thread (caam_jr_dequeue) to fail when it accesses it wait_for_completion_interruptible() is replaced with uninterruptable wait_for_completion(). We choose to block all signals while waiting for I/O (device executing the split key generation job descriptor) since the alternative - in order to have a deterministic device state - would be to flush the job ring (aborting *all* in-progress jobs). Fixes: 045e36780f115 ("crypto: caam - ahash hmac support") Fixes: 4c1ec1f930154 ("crypto: caam - refactor key_gen, sg") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 2474f14..631337c 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -491,7 +491,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index e1eaf4f..3ce1d5c 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", -- cgit v0.10.2 From a0a93e3e6e1e8c35d1d6cf624f2f74a976ab43d5 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 19 Jul 2017 09:58:49 +0200 Subject: Revert "sched/core: Optimize SCHED_SMT" This reverts commit 1b568f0aabf280555125bc7cefc08321ff0ebaba. For the 4.9 kernel tree, this patch causes scheduler regressions. It is fixed in newer kernels with a large number of individual patches, the sum of which is too big for the stable kernel tree. Ingo recommended just reverting the single patch for this tree, as it's much simpler. Reported-by: Ben Guthro Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Mike Galbraith Cc: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 692c948..737381d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7422,22 +7422,6 @@ int sched_cpu_dying(unsigned int cpu) } #endif -#ifdef CONFIG_SCHED_SMT -DEFINE_STATIC_KEY_FALSE(sched_smt_present); - -static void sched_init_smt(void) -{ - /* - * We've enumerated all CPUs and will assume that if any CPU - * has SMT siblings, CPU0 will too. - */ - if (cpumask_weight(cpu_smt_mask(0)) > 1) - static_branch_enable(&sched_smt_present); -} -#else -static inline void sched_init_smt(void) { } -#endif - void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; @@ -7467,9 +7451,6 @@ void __init sched_init_smp(void) init_sched_rt_class(); init_sched_dl_class(); - - sched_init_smt(); - sched_smp_initialized = true; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c242944..15fdae7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5376,7 +5376,7 @@ static inline bool test_idle_cores(int cpu, bool def) * Since SMT siblings share all cache levels, inspecting this limited remote * state should be fairly cheap. */ -void __update_idle_core(struct rq *rq) +void update_idle_core(struct rq *rq) { int core = cpu_of(rq); int cpu; @@ -5408,9 +5408,6 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); int core, cpu, wrap; - if (!static_branch_likely(&sched_smt_present)) - return -1; - if (!test_idle_cores(target, false)) return -1; @@ -5444,9 +5441,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t { int cpu; - if (!static_branch_likely(&sched_smt_present)) - return -1; - for_each_cpu(cpu, cpu_smt_mask(target)) { if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) continue; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 055f935..ad77d66 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -43,6 +43,12 @@ extern void cpu_load_update_active(struct rq *this_rq); static inline void cpu_load_update_active(struct rq *this_rq) { } #endif +#ifdef CONFIG_SCHED_SMT +extern void update_idle_core(struct rq *rq); +#else +static inline void update_idle_core(struct rq *rq) { } +#endif + /* * Helpers for converting nanosecond timing to jiffy resolution */ @@ -731,23 +737,6 @@ static inline int cpu_of(struct rq *rq) #endif } - -#ifdef CONFIG_SCHED_SMT - -extern struct static_key_false sched_smt_present; - -extern void __update_idle_core(struct rq *rq); - -static inline void update_idle_core(struct rq *rq) -{ - if (static_branch_unlikely(&sched_smt_present)) - __update_idle_core(rq); -} - -#else -static inline void update_idle_core(struct rq *rq) { } -#endif - DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) -- cgit v0.10.2 From 542ebc96c2004c665a4a6e3d2f2813685f0de7a3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 14 Apr 2017 14:20:05 +0200 Subject: sched/fair, cpumask: Export for_each_cpu_wrap() commit c6508a39640b9a27fc2bc10cb708152672c82045 upstream. commit c743f0a5c50f2fcbc628526279cfa24f3dabe182 upstream. More users for for_each_cpu_wrap() have appeared. Promote the construct to generic cpumask interface. The implementation is slightly modified to reduce arguments. Signed-off-by: Peter Zijlstra (Intel) Cc: Lauro Ramos Venancio Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: lwang@redhat.com Link: http://lkml.kernel.org/r/20170414122005.o35me2h5nowqkxbv@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar Signed-off-by: Mel Gorman Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index fa3b155..2d65bbd 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); + +/** + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask poiter + * @start: the start location + * + * The implementation does not assume any bit in @mask is set (including @start). + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ + (cpu) < nr_cpumask_bits; \ + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) + /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 15fdae7..7a68c63 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5310,43 +5310,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; } -/* - * Implement a for_each_cpu() variant that starts the scan at a given cpu - * (@start), and wraps around. - * - * This is used to scan for idle CPUs; such that not all CPUs looking for an - * idle CPU find the same CPU. The down-side is that tasks tend to cycle - * through the LLC domain. - * - * Especially tbench is found sensitive to this. - */ - -static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped) -{ - int next; - -again: - next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1); - - if (*wrapped) { - if (next >= start) - return nr_cpumask_bits; - } else { - if (next >= nr_cpumask_bits) { - *wrapped = 1; - n = -1; - goto again; - } - } - - return next; -} - -#define for_each_cpu_wrap(cpu, mask, start, wrap) \ - for ((wrap) = 0, (cpu) = (start)-1; \ - (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \ - (cpu) < nr_cpumask_bits; ) - #ifdef CONFIG_SCHED_SMT static inline void set_idle_cores(int cpu, int val) @@ -5406,14 +5369,14 @@ unlock: static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); - int core, cpu, wrap; + int core, cpu; if (!test_idle_cores(target, false)) return -1; cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p)); - for_each_cpu_wrap(core, cpus, target, wrap) { + for_each_cpu_wrap(core, cpus, target) { bool idle = true; for_each_cpu(cpu, cpu_smt_mask(core)) { @@ -5476,7 +5439,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t u64 avg_cost, avg_idle = this_rq()->avg_idle; u64 time, cost; s64 delta; - int cpu, wrap; + int cpu; this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); if (!this_sd) @@ -5493,7 +5456,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t time = local_clock(); - for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) { + for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) continue; if (idle_cpu(cpu)) diff --git a/lib/cpumask.c b/lib/cpumask.c index 81dedaa..4731a08 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } EXPORT_SYMBOL(cpumask_any_but); +/** + * cpumask_next_wrap - helper to implement for_each_cpu_wrap + * @n: the cpu prior to the place to search + * @mask: the cpumask pointer + * @start: the start point of the iteration + * @wrap: assume @n crossing @start terminates the iteration + * + * Returns >= nr_cpu_ids on completion + * + * Note: the @wrap argument is required for the start condition when + * we cannot assume @start is set in @mask. + */ +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +{ + int next; + +again: + next = cpumask_next(n, mask); + + if (wrap && n < start && next >= start) { + return nr_cpumask_bits; + + } else if (next >= nr_cpumask_bits) { + wrap = true; + n = -1; + goto again; + } + + return next; +} +EXPORT_SYMBOL(cpumask_next_wrap); + /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK /** -- cgit v0.10.2 From 7c3f08eadcfdfca74b2563e941e645b52cc34622 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 14 Apr 2017 17:24:02 +0200 Subject: sched/topology: Fix building of overlapping sched-groups commit 0372dd2736e02672ac6e189c31f7d8c02ad543cd upstream. When building the overlapping groups, we very obviously should start with the previous domain of _this_ @cpu, not CPU-0. This can be readily demonstrated with a topology like: node 0 1 2 3 0: 10 20 30 20 1: 20 10 20 30 2: 30 20 10 20 3: 20 30 20 10 Where (for example) CPU1 ends up generating the following nonsensical groups: [] CPU1 attaching sched-domain: [] domain 0: span 0-2 level NUMA [] groups: 1 2 0 [] domain 1: span 0-3 level NUMA [] groups: 1-3 (cpu_capacity = 3072) 0-1,3 (cpu_capacity = 3072) Where the fact that domain 1 doesn't include a group with span 0-2 is the obvious fail. With patch this looks like: [] CPU1 attaching sched-domain: [] domain 0: span 0-2 level NUMA [] groups: 1 0 2 [] domain 1: span 0-3 level NUMA [] groups: 0-2 (cpu_capacity = 3072) 0,2-3 (cpu_capacity = 3072) Debugged-by: Lauro Ramos Venancio Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Fixes: e3589f6c81e4 ("sched: Allow for overlapping sched_domain spans") Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 737381d..19f84d9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6148,7 +6148,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) cpumask_clear(covered); - for_each_cpu(i, span) { + for_each_cpu_wrap(i, span, cpu) { struct cpumask *sg_span; if (cpumask_test_cpu(i, covered)) -- cgit v0.10.2 From 3e165b2322c6a7e50031cb91ef95ee6cb4e1d163 Mon Sep 17 00:00:00 2001 From: Lauro Ramos Venancio Date: Thu, 20 Apr 2017 16:51:40 -0300 Subject: sched/topology: Optimize build_group_mask() commit f32d782e31bf079f600dcec126ed117b0577e85c upstream. The group mask is always used in intersection with the group CPUs. So, when building the group mask, we don't have to care about CPUs that are not part of the group. Signed-off-by: Lauro Ramos Venancio Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: lwang@redhat.com Cc: riel@redhat.com Link: http://lkml.kernel.org/r/1492717903-5195-2-git-send-email-lvenanci@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 19f84d9..326be9d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6113,12 +6113,12 @@ enum s_alloc { */ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) { - const struct cpumask *span = sched_domain_span(sd); + const struct cpumask *sg_span = sched_group_cpus(sg); struct sd_data *sdd = sd->private; struct sched_domain *sibling; int i; - for_each_cpu(i, span) { + for_each_cpu(i, sg_span) { sibling = *per_cpu_ptr(sdd->sd, i); if (!cpumask_test_cpu(i, sched_domain_span(sibling))) continue; -- cgit v0.10.2 From 758dc6a8dabc0c533e3aff8a2095b9de8d597768 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Apr 2017 14:00:49 +0200 Subject: sched/topology: Fix overlapping sched_group_mask commit 73bb059f9b8a00c5e1bf2f7ca83138c05d05e600 upstream. The point of sched_group_mask is to select those CPUs from sched_group_cpus that can actually arrive at this balance domain. The current code gets it wrong, as can be readily demonstrated with a topology like: node 0 1 2 3 0: 10 20 30 20 1: 20 10 20 30 2: 30 20 10 20 3: 20 30 20 10 Where (for example) domain 1 on CPU1 ends up with a mask that includes CPU0: [] CPU1 attaching sched-domain: [] domain 0: span 0-2 level NUMA [] groups: 1 (mask: 1), 2, 0 [] domain 1: span 0-3 level NUMA [] groups: 0-2 (mask: 0-2) (cpu_capacity: 3072), 0,2-3 (cpu_capacity: 3072) This causes sched_balance_cpu() to compute the wrong CPU and consequently should_we_balance() will terminate early resulting in missed load-balance opportunities. The fixed topology looks like: [] CPU1 attaching sched-domain: [] domain 0: span 0-2 level NUMA [] groups: 1 (mask: 1), 2, 0 [] domain 1: span 0-3 level NUMA [] groups: 0-2 (mask: 1) (cpu_capacity: 3072), 0,2-3 (cpu_capacity: 3072) (note: this relies on OVERLAP domains to always have children, this is true because the regular topology domains are still here -- this is before degenerate trimming) Debugged-by: Lauro Ramos Venancio Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Fixes: e3589f6c81e4 ("sched: Allow for overlapping sched_domain spans") Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 326be9d..d177b21 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6102,6 +6102,9 @@ enum s_alloc { * Build an iteration mask that can exclude certain CPUs from the upwards * domain traversal. * + * Only CPUs that can arrive at this group should be considered to continue + * balancing. + * * Asymmetric node setups can result in situations where the domain tree is of * unequal depth, make sure to skip domains that already cover the entire * range. @@ -6120,11 +6123,24 @@ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) for_each_cpu(i, sg_span) { sibling = *per_cpu_ptr(sdd->sd, i); - if (!cpumask_test_cpu(i, sched_domain_span(sibling))) + + /* + * Can happen in the asymmetric case, where these siblings are + * unused. The mask will not be empty because those CPUs that + * do have the top domain _should_ span the domain. + */ + if (!sibling->child) + continue; + + /* If we would not end up here, we can't continue from here */ + if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) continue; cpumask_set_cpu(i, sched_group_mask(sg)); } + + /* We must not have empty masks here */ + WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); } /* -- cgit v0.10.2 From 5480437f7963999c7634ccc1f9057c87ea8e198f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 25 Jun 2017 19:31:13 +0200 Subject: PM / wakeirq: Convert to SRCU commit ea0212f40c6bc0594c8eff79266759e3ecd4bacc upstream. The wakeirq infrastructure uses RCU to protect the list of wakeirqs. That breaks the irq bus locking infrastructure, which is allows sleeping functions to be called so interrupt controllers behind slow busses, e.g. i2c, can be handled. The wakeirq functions hold rcu_read_lock and call into irq functions, which in case of interrupts using the irq bus locking will trigger a might_sleep() splat. Convert the wakeirq infrastructure to Sleepable RCU and unbreak it. Fixes: 4990d4fe327b (PM / Wakeirq: Add automated device wake IRQ handling) Reported-by: Brian Norris Suggested-by: Paul E. McKenney Signed-off-by: Thomas Gleixner Reviewed-by: Paul E. McKenney Tested-by: Tony Lindgren Tested-by: Brian Norris Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 62e4de2..f98121f 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +DEFINE_STATIC_SRCU(wakeup_srcu); + static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), @@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws) spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); spin_unlock_irqrestore(&events_lock, flags); - synchronize_rcu(); + synchronize_srcu(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev) void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); - - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void) void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); - - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -805,10 +807,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_event); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; - int active = 0; + int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_info("active wakeup source: %s\n", ws->name); @@ -824,7 +826,7 @@ void pm_print_active_wakeup_sources(void) if (!active && last_activity_ws) pr_info("last active wakeup source: %s\n", last_activity_ws->name); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); @@ -951,8 +953,9 @@ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { @@ -966,7 +969,7 @@ void pm_wakep_autosleep_enabled(bool set) } spin_unlock_irq(&ws->lock); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ @@ -1027,15 +1030,16 @@ static int print_wakeup_source_stats(struct seq_file *m, static int wakeup_sources_stats_show(struct seq_file *m, void *unused) { struct wakeup_source *ws; + int srcuidx; seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) print_wakeup_source_stats(m, ws); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); print_wakeup_source_stats(m, &deleted_ws); -- cgit v0.10.2 From cc7d3b7dd1ac43bf4643d6d3e924f4b9f0e8da05 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 10 Jul 2017 10:21:40 +0300 Subject: PM / QoS: return -EINVAL for bogus strings commit 2ca30331c156ca9e97643ad05dd8930b8fe78b01 upstream. In the current code, if the user accidentally writes a bogus command to this sysfs file, then we set the latency tolerance to an uninitialized variable. Fixes: 2d984ad132a8 (PM / QoS: Introcuce latency tolerance device PM QoS type) Signed-off-by: Dan Carpenter Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a7b4679..39efa7e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) value = PM_QOS_LATENCY_ANY; + else + return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; -- cgit v0.10.2 From 04e002a5f681590d4b3d9aceb4aeb5526ce162e6 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 9 Dec 2016 21:50:17 +0530 Subject: tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results commit c59f29cb144a6a0dfac16ede9dc8eafc02dc56ca upstream. The 's' flag is supposed to indicate that a softirq is running. This can be detected by testing the preempt_count with SOFTIRQ_OFFSET. The current code tests the preempt_count with SOFTIRQ_MASK, which would be true even when softirqs are disabled but not serving a softirq. Link: http://lkml.kernel.org/r/1481300417-3564-1-git-send-email-pkondeti@codeaurora.org Signed-off-by: Pavankumar Kondeti Signed-off-by: Steven Rostedt Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 83c60f9..52ee2c5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1906,7 +1906,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | - ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | + ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } -- cgit v0.10.2 From bf7c2153561772f58b64b702babc733216be5193 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 23 May 2017 11:52:52 -0700 Subject: kvm: vmx: Do not disable intercepts for BNDCFGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a8b6fda38f80e75afa3b125c9e7f2550b579454b upstream. The MSR permission bitmaps are shared by all VMs. However, some VMs may not be configured to support MPX, even when the host does. If the host supports VMX and the guest does not, we should intercept accesses to the BNDCFGS MSR, so that we can synthesize a #GP fault. Furthermore, if the host does not support MPX and the "ignore_msrs" kvm kernel parameter is set, then we should intercept accesses to the BNDCFGS MSR, so that we can skip over the rdmsr/wrmsr without raising a #GP fault. Fixes: da8999d31818fdc8 ("KVM: x86: Intel MPX vmx and msr handle") Signed-off-by: Jim Mattson Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 04e6bbb..04d8bde 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6474,7 +6474,6 @@ static __init int hardware_setup(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); -- cgit v0.10.2 From fab777e70ca46898422f6b62de08f20d86952ed2 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 24 May 2017 10:49:25 -0700 Subject: kvm: x86: Guest BNDCFGS requires guest MPX support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4439af9f911ae0243ffe4e2dfc12bace49605d8b upstream. The BNDCFGS MSR should only be exposed to the guest if the guest supports MPX. (cf. the TSC_AUX MSR and RDTSCP.) Fixes: 0dd376e709975779 ("KVM: x86: add MSR_IA32_BNDCFGS to msrs_to_save") Change-Id: I3ad7c01bda616715137ceac878f3fa7e66b6b387 Signed-off-by: Jim Mattson Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 35058c2..9368fec 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_RTM)); } +static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); + return best && (best->ebx & bit(X86_FEATURE_MPX)); +} + static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 04d8bde..e64cde7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2987,7 +2987,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported()) + if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu)) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -3069,7 +3069,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported()) + if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu)) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; -- cgit v0.10.2 From 07592d6225365e26777788c750dc2770d8e501b7 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 23 May 2017 11:52:54 -0700 Subject: kvm: vmx: Check value written to IA32_BNDCFGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4531662d1abf6c1f0e5c2b86ddb60e61509786c8 upstream. Bits 11:2 must be zero and the linear addess in bits 63:12 must be canonical. Otherwise, WRMSR(BNDCFGS) should raise #GP. Fixes: 0dd376e709975779 ("KVM: x86: add MSR_IA32_BNDCFGS to msrs_to_save") Signed-off-by: Jim Mattson Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 78f3760..b601dda 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -405,6 +405,8 @@ #define MSR_IA32_TSC_ADJUST 0x0000003b #define MSR_IA32_BNDCFGS 0x00000d90 +#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc + #define MSR_IA32_XSS 0x00000da0 #define FEATURE_CONTROL_LOCKED (1<<0) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e64cde7..4acb3cd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3071,6 +3071,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu)) return 1; + if (is_noncanonical_address(data & PAGE_MASK) || + (data & MSR_IA32_BNDCFGS_RSVD)) + return 1; vmcs_write64(GUEST_BNDCFGS, data); break; case MSR_IA32_TSC: -- cgit v0.10.2 From cce8d2ee45715c1cc82609c885110656b038f51a Mon Sep 17 00:00:00 2001 From: Haozhong Zhang Date: Tue, 4 Jul 2017 10:27:41 +0800 Subject: kvm: vmx: allow host to access guest MSR_IA32_BNDCFGS commit 691bd4340bef49cf7e5855d06cf24444b5bf2d85 upstream. It's easier for host applications, such as QEMU, if they can always access guest MSR_IA32_BNDCFGS in VMCS, even though MPX is disabled in guest cpuid. Signed-off-by: Haozhong Zhang Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4acb3cd..3dc6d80 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2987,7 +2987,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu)) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -3069,7 +3070,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu)) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) return 1; if (is_noncanonical_address(data & PAGE_MASK) || (data & MSR_IA32_BNDCFGS_RSVD)) -- cgit v0.10.2 From c03917de04aa68017a737e90ea01338d991eaff5 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 21 Jul 2017 07:42:36 +0200 Subject: 4.9.39 diff --git a/Makefile b/Makefile index ad0c045..a872ece 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 38 +SUBLEVEL = 39 EXTRAVERSION = NAME = Roaring Lionus -- cgit v0.10.2