From 98184bbb8daea6af32208d63831e66023db4bb58 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 18 Feb 2017 19:00:45 -0500 Subject: ipv6: release dst on error in ip6_dst_lookup_tail commit 00ea1ceebe0d9f2dc1cc2b7bd575a00100c27869 upstream. If ip6_dst_lookup_tail has acquired a dst and fails the IPv4-mapped check, release the dst before returning an error. Fixes: ec5e3b0a1d41 ("ipv6: Inhibit IPv4-mapped src address on the wire.") Signed-off-by: Willem de Bruijn Acked-by: Eric Dumazet Signed-off-by: David S. Miller Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3ab32ac..fd64959 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1020,8 +1020,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, } #endif if (ipv6_addr_v4mapped(&fl6->saddr) && - !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) - return -EAFNOSUPPORT; + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) { + err = -EAFNOSUPPORT; + goto out_err_release; + } return 0; -- cgit v0.10.2 From d2f459e3feb0f73d2e95ab7892adcf22f21fe9ef Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 6 Jun 2017 15:56:54 +0200 Subject: net: don't call strlen on non-terminated string in dev_set_alias() [ Upstream commit c28294b941232931fbd714099798eb7aa7e865d7 ] KMSAN reported a use of uninitialized memory in dev_set_alias(), which was caused by calling strlcpy() (which in turn called strlen()) on the user-supplied non-terminated string. Signed-off-by: Alexander Potapenko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/dev.c b/net/core/dev.c index 2e04fd1..97f8061 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1250,8 +1250,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) if (!new_ifalias) return -ENOMEM; dev->ifalias = new_ifalias; + memcpy(dev->ifalias, alias, len); + dev->ifalias[len] = 0; - strlcpy(dev->ifalias, alias, len+1); return len; } -- cgit v0.10.2 From fd9b13e6c175b01d61f0f234502919c6c40e4dd2 Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Wed, 7 Jun 2017 16:14:29 +0200 Subject: decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb [ Upstream commit dd0da17b209ed91f39872766634ca967c170ada1 ] Verify that the length of the socket buffer is sufficient to cover the nlmsghdr structure before accessing the nlh->nlmsg_len field for further input sanitization. If the client only supplies 1-3 bytes of data in sk_buff, then nlh->nlmsg_len remains partially uninitialized and contains leftover memory from the corresponding kernel allocation. Operating on such data may result in indeterminate evaluation of the nlmsg_len < sizeof(*nlh) expression. The bug was discovered by a runtime instrumentation designed to detect use of uninitialized memory in the kernel. The patch prevents this and other similar tools (e.g. KMSAN) from flagging this behavior in the future. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 85f2fdc..29246bc 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) + if (skb->len < sizeof(*nlh) || + nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) return; if (!netlink_capable(skb, CAP_NET_ADMIN)) -- cgit v0.10.2 From b5cc68e0c1905a3cb94677a4d1b3e03f65881231 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Wed, 7 Jun 2017 21:00:33 +0300 Subject: net: Zero ifla_vf_info in rtnl_fill_vfinfo() [ Upstream commit 0eed9cf58446b28b233388b7f224cbca268b6986 ] Some of the structure's fields are not initialized by the rtnetlink. If driver doesn't set those in ndo_get_vf_config(), they'd leak memory to user. Signed-off-by: Yuval Mintz CC: Michal Schmidt Reviewed-by: Greg Rose Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1d91607..d574409 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1130,6 +1130,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct ifla_vf_mac vf_mac; struct ifla_vf_info ivi; + memset(&ivi, 0, sizeof(ivi)); + /* Not all SR-IOV capable drivers support the * spoofcheck and "RSS query enable" query. Preset to * -1 so the user space tool can detect that the driver @@ -1138,7 +1140,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, ivi.spoofchk = -1; ivi.rss_query_en = -1; ivi.trusted = -1; - memset(ivi.mac, 0, sizeof(ivi.mac)); /* The default value for VF link state is "auto" * IFLA_VF_LINK_STATE_AUTO which equals zero */ -- cgit v0.10.2 From 386ed38f0f28b5dffe11c5665997882115fb788e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 8 Jun 2017 11:31:11 -0600 Subject: net: vrf: Make add_fib_rules per network namespace flag [ Upstream commit 097d3c9508dc58286344e4a22b300098cf0c1566 ] Commit 1aa6c4f6b8cd8 ("net: vrf: Add l3mdev rules on first device create") adds the l3mdev FIB rule the first time a VRF device is created. However, it only creates the rule once and only in the namespace the first device is created - which may not be init_net. Fix by using the net_generic capability to make the add_fib_rules flag per network namespace. Fixes: 1aa6c4f6b8cd8 ("net: vrf: Add l3mdev rules on first device create") Reported-by: Petr Machata Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ee02605..642df93 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -36,12 +36,14 @@ #include #include #include +#include #define DRV_NAME "vrf" #define DRV_VERSION "1.0" #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ -static bool add_fib_rules = true; + +static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; @@ -1237,6 +1239,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); + bool *add_fib_rules; + struct net *net; int err; if (!data || !data[IFLA_VRF_TABLE]) @@ -1252,13 +1256,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, if (err) goto out; - if (add_fib_rules) { + net = dev_net(dev); + add_fib_rules = net_generic(net, vrf_net_id); + if (*add_fib_rules) { err = vrf_add_fib_rules(dev); if (err) { unregister_netdevice(dev); goto out; } - add_fib_rules = false; + *add_fib_rules = false; } out: @@ -1341,16 +1347,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = { .notifier_call = vrf_device_event, }; +/* Initialize per network namespace state */ +static int __net_init vrf_netns_init(struct net *net) +{ + bool *add_fib_rules = net_generic(net, vrf_net_id); + + *add_fib_rules = true; + + return 0; +} + +static struct pernet_operations vrf_net_ops __net_initdata = { + .init = vrf_netns_init, + .id = &vrf_net_id, + .size = sizeof(bool), +}; + static int __init vrf_init_module(void) { int rc; register_netdevice_notifier(&vrf_notifier_block); - rc = rtnl_link_register(&vrf_link_ops); + rc = register_pernet_subsys(&vrf_net_ops); if (rc < 0) goto error; + rc = rtnl_link_register(&vrf_link_ops); + if (rc < 0) { + unregister_pernet_subsys(&vrf_net_ops); + goto error; + } + return 0; error: -- cgit v0.10.2 From bb84290cd2967a5774a97fa44381713e20a7924c Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Thu, 8 Jun 2017 11:13:36 +0200 Subject: af_unix: Add sockaddr length checks before accessing sa_family in bind and connect handlers [ Upstream commit defbcf2decc903a28d8398aa477b6881e711e3ea ] Verify that the caller-provided sockaddr structure is large enough to contain the sa_family field, before accessing it in bind() and connect() handlers of the AF_UNIX socket. Since neither syscall enforces a minimum size of the corresponding memory region, very short sockaddrs (zero or one byte long) result in operating on uninitialized memory while referencing .sa_family. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2d03d5b..915abe9 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -998,7 +998,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct path path = { NULL, NULL }; err = -EINVAL; - if (sunaddr->sun_family != AF_UNIX) + if (addr_len < offsetofend(struct sockaddr_un, sun_family) || + sunaddr->sun_family != AF_UNIX) goto out; if (addr_len == sizeof(short)) { @@ -1109,6 +1110,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, unsigned int hash; int err; + err = -EINVAL; + if (alen < offsetofend(struct sockaddr, sa_family)) + goto out; + if (addr->sa_family != AF_UNSPEC) { err = unix_mkname(sunaddr, alen, &hash); if (err < 0) -- cgit v0.10.2 From c6d4ff85722b25877af48b311eda944dcc8c6feb Mon Sep 17 00:00:00 2001 From: Krister Johansen Date: Thu, 8 Jun 2017 13:12:38 -0700 Subject: Fix an intermittent pr_emerg warning about lo becoming free. [ Upstream commit f186ce61bb8235d80068c390dc2aad7ca427a4c2 ] It looks like this: Message from syslogd@flamingo at Apr 26 00:45:00 ... kernel:unregister_netdevice: waiting for lo to become free. Usage count = 4 They seem to coincide with net namespace teardown. The message is emitted by netdev_wait_allrefs(). Forced a kdump in netdev_run_todo, but found that the refcount on the lo device was already 0 at the time we got to the panic. Used bcc to check the blocking in netdev_run_todo. The only places where we're off cpu there are in the rcu_barrier() and msleep() calls. That behavior is expected. The msleep time coincides with the amount of time we spend waiting for the refcount to reach zero; the rcu_barrier() wait times are not excessive. After looking through the list of callbacks that the netdevice notifiers invoke in this path, it appears that the dst_dev_event is the most interesting. The dst_ifdown path places a hold on the loopback_dev as part of releasing the dev associated with the original dst cache entry. Most of our notifier callbacks are straight-forward, but this one a) looks complex, and b) places a hold on the network interface in question. I constructed a new bcc script that watches various events in the liftime of a dst cache entry. Note that dst_ifdown will take a hold on the loopback device until the invalidated dst entry gets freed. [ __dst_free] on DST: ffff883ccabb7900 IF tap1008300eth0 invoked at 1282115677036183 __dst_free rcu_nocb_kthread kthread ret_from_fork Acked-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/dst.c b/net/core/dst.c index 656b70d..39cc119 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -470,6 +470,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, spin_lock_bh(&dst_garbage.lock); dst = dst_garbage.list; dst_garbage.list = NULL; + /* The code in dst_ifdown places a hold on the loopback device. + * If the gc entry processing is set to expire after a lengthy + * interval, this hold can cause netdev_wait_allrefs() to hang + * out and wait for a long time -- until the the loopback + * interface is released. If we're really unlucky, it'll emit + * pr_emerg messages to console too. Reset the interval here, + * so dst cleanups occur in a more timely fashion. + */ + if (dst_garbage.timer_inc > DST_GC_INC) { + dst_garbage.timer_inc = DST_GC_INC; + dst_garbage.timer_expires = DST_GC_MIN; + mod_delayed_work(system_wq, &dst_gc_work, + dst_garbage.timer_expires); + } spin_unlock_bh(&dst_garbage.lock); if (last) -- cgit v0.10.2 From 8cda426a7cfa61b902c4335d1d1ab945bbcb41b6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 10 Jun 2017 14:48:14 +0800 Subject: sctp: disable BH in sctp_for_each_endpoint [ Upstream commit 581409dacc9176b0de1f6c4ca8d66e13aa8e1b29 ] Now sctp holds read_lock when foreach sctp_ep_hashtable without disabling BH. If CPU schedules to another thread A at this moment, the thread A may be trying to hold the write_lock with disabling BH. As BH is disabled and CPU cannot schedule back to the thread holding the read_lock, while the thread A keeps waiting for the read_lock. A dead lock would be triggered by this. This patch is to fix this dead lock by calling read_lock_bh instead to disable BH when holding the read_lock in sctp_for_each_endpoint. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Reported-by: Xiumei Mu Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e1719c6..0c5257e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4460,13 +4460,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { - read_lock(&head->lock); + read_lock_bh(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } - read_unlock(&head->lock); + read_unlock_bh(&head->lock); } return err; -- cgit v0.10.2 From bb566ce3a60eded40ae4a3421a59c0f5f1c7ef20 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 10 Jun 2017 16:49:39 +0800 Subject: net: caif: Fix a sleep-in-atomic bug in cfpkt_create_pfx [ Upstream commit f146e872eb12ebbe92d8e583b2637e0741440db3 ] The kernel may sleep under a rcu read lock in cfpkt_create_pfx, and the function call path is: cfcnfg_linkup_rsp (acquire the lock by rcu_read_lock) cfctrl_linkdown_req cfpkt_create cfpkt_create_pfx alloc_skb(GFP_KERNEL) --> may sleep cfserl_receive (acquire the lock by rcu_read_lock) cfpkt_split cfpkt_create_pfx alloc_skb(GFP_KERNEL) --> may sleep There is "in_interrupt" in cfpkt_create_pfx to decide use "GFP_KERNEL" or "GFP_ATOMIC". In this situation, "GFP_KERNEL" is used because the function is called under a rcu read lock, instead in interrupt. To fix it, only "GFP_ATOMIC" is used in cfpkt_create_pfx. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 59ce1fc..71b6ab2 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; - if (likely(in_interrupt())) - skb = alloc_skb(len + pfx, GFP_ATOMIC); - else - skb = alloc_skb(len + pfx, GFP_KERNEL); - + skb = alloc_skb(len + pfx, GFP_ATOMIC); if (unlikely(skb == NULL)) return NULL; -- cgit v0.10.2 From 57360bc3c7a6fc9c7422e422508bf77166a05028 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 10 Jun 2017 17:03:35 +0800 Subject: net: tipc: Fix a sleep-in-atomic bug in tipc_msg_reverse [ Upstream commit 343eba69c6968190d8654b857aea952fed9a6749 ] The kernel may sleep under a rcu read lock in tipc_msg_reverse, and the function call path is: tipc_l2_rcv_msg (acquire the lock by rcu_read_lock) tipc_rcv tipc_sk_rcv tipc_msg_reverse pskb_expand_head(GFP_KERNEL) --> may sleep tipc_node_broadcast tipc_node_xmit_skb tipc_node_xmit tipc_sk_rcv tipc_msg_reverse pskb_expand_head(GFP_KERNEL) --> may sleep To fix it, "GFP_KERNEL" is replaced with "GFP_ATOMIC". Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 17201aa..1bd9817 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) } if (skb_cloned(_skb) && - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) + pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) goto exit; /* Now reverse the concerned fields */ -- cgit v0.10.2 From 9854e58659908b4923d95b0fe3cd1db7ea62fe39 Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Mon, 15 May 2017 14:13:16 +0300 Subject: net/mlx5e: Added BW check for DIM decision mechanism [ Upstream commit c3164d2fc48fd4fa0477ab658b644559c3fe9073 ] DIM (Dynamically-tuned Interrupt Moderation) is a mechanism designed for changing the channel interrupt moderation values in order to reduce CPU overhead for all traffic types. Until now only interrupt and packet rate were sampled. We found a scenario on which we get a false indication since a change in DIM caused more aggregation and reduced packet rate while increasing BW. We now regard a change as succesfull iff: current_BW > (prev_BW + threshold) or current_BW ~= prev_BW and current_PR > (prev_PR + threshold) or current_BW ~= prev_BW and current_PR ~= prev_PR and current_IR < (prev_IR - threshold) Where BW = Bandwidth, PR = Packet rate and IR = Interrupt rate Improvements (ConnectX-4Lx 25GbE, single RX queue, LRO off) -------------------------------------------------- packet size | before[Mb/s] | after[Mb/s] | gain | 2B | 343.4 | 359.4 | 4.5% | 16B | 2739.7 | 2814.8 | 2.7% | 64B | 9739 | 10185.3 | 4.5% | Fixes: cb3c7fd4f839 ("net/mlx5e: Support adaptive RX coalescing") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 21ce0b7..6edc8b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -283,12 +283,14 @@ struct mlx5e_dma_info { struct mlx5e_rx_am_stats { int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ int epms; /* events per msec */ }; struct mlx5e_rx_am_sample { ktime_t time; unsigned int pkt_ctr; + unsigned int byte_ctr; u16 event_ctr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index cbfac06..1750388 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) mlx5e_am_step(am); } +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, struct mlx5e_rx_am_stats *prev) { - int diff; - - if (!prev->ppms) - return curr->ppms ? MLX5E_AM_STATS_BETTER : + if (!prev->bpms) + return curr->bpms ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_SAME; - diff = curr->ppms - prev->ppms; - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ - return (diff > 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - if (!prev->epms) - return curr->epms ? MLX5E_AM_STATS_WORSE : - MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - diff = curr->epms - prev->epms; - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ - return (diff < 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; return MLX5E_AM_STATS_SAME; } @@ -266,6 +265,7 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, { s->time = ktime_get(); s->pkt_ctr = rq->stats.packets; + s->byte_ctr = rq->stats.bytes; s->event_ctr = rq->cq.event_ctr; } @@ -278,12 +278,15 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + unsigned int nbytes = end->byte_ctr - start->byte_ctr; if (!delta_us) return; - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, + delta_us); } void mlx5e_rx_am_work(struct work_struct *work) -- cgit v0.10.2 From 78b24ab695abafe4c5754a661a591b841661df8b Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Mon, 29 May 2017 17:02:55 +0300 Subject: net/mlx5e: Fix wrong indications in DIM due to counter wraparound [ Upstream commit 53acd76ce571e3b71f9205f2d49ab285a9f1aad8 ] DIM (Dynamically-tuned Interrupt Moderation) is a mechanism designed for changing the channel interrupt moderation values in order to reduce CPU overhead for all traffic types. Each iteration of the algorithm, DIM calculates the difference in throughput, packet rate and interrupt rate from last iteration in order to make a decision. DIM relies on counters for each metric. When these counters get to their type's max value they wraparound. In this case the delta between 'end' and 'start' samples is negative and when translated to unsigned integers - very high. This results in a false indication to the algorithm and might result in a wrong decision. The fix calculates the 'distance' between 'end' and 'start' samples in a cyclic way around the relevant type's max value. It can also be viewed as an absolute value around the type's max value instead of around 0. Testing show higher stability in DIM profile selection and no wraparound issues. Fixes: cb3c7fd4f839 ("net/mlx5e: Support adaptive RX coalescing") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 6edc8b2..6180ad4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -288,10 +288,10 @@ struct mlx5e_rx_am_stats { }; struct mlx5e_rx_am_sample { - ktime_t time; - unsigned int pkt_ctr; - unsigned int byte_ctr; - u16 event_ctr; + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; }; struct mlx5e_rx_am { /* Adaptive Moderation */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 1750388..23ccec4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -270,6 +270,8 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, } #define MLX5E_AM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, struct mlx5e_rx_am_sample *end, @@ -277,8 +279,9 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, { /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); - unsigned int npkts = end->pkt_ctr - start->pkt_ctr; - unsigned int nbytes = end->byte_ctr - start->byte_ctr; + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); if (!delta_us) return; @@ -311,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq) switch (am->state) { case MLX5E_AM_MEASURE_IN_PROGRESS: - nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, + am->start_sample.event_ctr); if (nevents < MLX5E_AM_NEVENTS) break; mlx5e_am_sample(rq, &end_sample); -- cgit v0.10.2 From 059686754c1870f182ce55495b81728763732d48 Mon Sep 17 00:00:00 2001 From: Christian Perle Date: Mon, 12 Jun 2017 10:06:57 +0200 Subject: proc: snmp6: Use correct type in memset [ Upstream commit 3500cd73dff48f28f4ba80c171c4c80034d40f76 ] Reading /proc/net/snmp6 yields bogus values on 32 bit kernels. Use "u64" instead of "unsigned long" in sizeof(). Fixes: 4a4857b1c81e ("proc: Reduce cache miss in snmp6_seq_show") Signed-off-by: Christian Perle Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index cc8e3ae..e88bcb8 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib, u64 buff64[SNMP_MIB_MAX]; int i; - memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); + memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX); snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); for (i = 0; itemlist[i].name; i++) -- cgit v0.10.2 From ecd6627f48bd2d8e0f85eee703b5b4609ed6f744 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 12 Jun 2017 09:52:26 -0700 Subject: igmp: acquire pmc lock for ip_mc_clear_src() [ Upstream commit c38b7d327aafd1e3ad7ff53eefac990673b65667 ] Andrey reported a use-after-free in add_grec(): for (psf = *psf_list; psf; psf = psf_next) { ... psf_next = psf->sf_next; where the struct ip_sf_list's were already freed by: kfree+0xe8/0x2b0 mm/slub.c:3882 ip_mc_clear_src+0x69/0x1c0 net/ipv4/igmp.c:2078 ip_mc_dec_group+0x19a/0x470 net/ipv4/igmp.c:1618 ip_mc_drop_socket+0x145/0x230 net/ipv4/igmp.c:2609 inet_release+0x4e/0x1c0 net/ipv4/af_inet.c:411 sock_release+0x8d/0x1e0 net/socket.c:597 sock_close+0x16/0x20 net/socket.c:1072 This happens because we don't hold pmc->lock in ip_mc_clear_src() and a parallel mr_ifc_timer timer could jump in and access them. The RCU lock is there but it is merely for pmc itself, this spinlock could actually ensure we don't access them in parallel. Thanks to Eric and Long for discussion on this bug. Reported-by: Andrey Konovalov Cc: Eric Dumazet Cc: Xin Long Signed-off-by: Cong Wang Reviewed-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1bc623d..8520eff 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2071,21 +2071,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, static void ip_mc_clear_src(struct ip_mc_list *pmc) { - struct ip_sf_list *psf, *nextpsf; + struct ip_sf_list *psf, *nextpsf, *tomb, *sources; - for (psf = pmc->tomb; psf; psf = nextpsf) { + spin_lock_bh(&pmc->lock); + tomb = pmc->tomb; + pmc->tomb = NULL; + sources = pmc->sources; + pmc->sources = NULL; + pmc->sfmode = MCAST_EXCLUDE; + pmc->sfcount[MCAST_INCLUDE] = 0; + pmc->sfcount[MCAST_EXCLUDE] = 1; + spin_unlock_bh(&pmc->lock); + + for (psf = tomb; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } - pmc->tomb = NULL; - for (psf = pmc->sources; psf; psf = nextpsf) { + for (psf = sources; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } - pmc->sources = NULL; - pmc->sfmode = MCAST_EXCLUDE; - pmc->sfcount[MCAST_INCLUDE] = 0; - pmc->sfcount[MCAST_EXCLUDE] = 1; } /* Join a multicast group -- cgit v0.10.2 From cac2a9bb4034f2395bdbe1ad2bd3f29a470e14f0 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 20 Jun 2017 10:46:27 -0700 Subject: igmp: add a missing spin_lock_init() [ Upstream commit b4846fc3c8559649277e3e4e6b5cec5348a8d208 ] Andrey reported a lockdep warning on non-initialized spinlock: INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 1 PID: 4099 Comm: a.out Not tainted 4.12.0-rc6+ #9 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 register_lock_class+0x717/0x1aa0 kernel/locking/lockdep.c:755 ? 0xffffffffa0000000 __lock_acquire+0x269/0x3690 kernel/locking/lockdep.c:3255 lock_acquire+0x22d/0x560 kernel/locking/lockdep.c:3855 __raw_spin_lock_bh ./include/linux/spinlock_api_smp.h:135 _raw_spin_lock_bh+0x36/0x50 kernel/locking/spinlock.c:175 spin_lock_bh ./include/linux/spinlock.h:304 ip_mc_clear_src+0x27/0x1e0 net/ipv4/igmp.c:2076 igmpv3_clear_delrec+0xee/0x4f0 net/ipv4/igmp.c:1194 ip_mc_destroy_dev+0x4e/0x190 net/ipv4/igmp.c:1736 We miss a spin_lock_init() in igmpv3_add_delrec(), probably because previously we never use it on this code path. Since we already unlink it from the global mc_tomb list, it is probably safe not to acquire this spinlock here. It does not harm to have it although, to avoid conditional locking. Fixes: c38b7d327aaf ("igmp: acquire pmc lock for ip_mc_clear_src()") Reported-by: Andrey Konovalov Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 8520eff..19930da 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return; + spin_lock_init(&pmc->lock); spin_lock_bh(&im->lock); pmc->interface = im->interface; in_dev_hold(in_dev); -- cgit v0.10.2 From fded2d74a3505f7daad70db4e8ffd87ceb366ecb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 15 Jun 2017 16:33:58 +0800 Subject: ipv6: fix calling in6_ifa_hold incorrectly for dad work [ Upstream commit f8a894b218138888542a5058d0e902378fd0d4ec ] Now when starting the dad work in addrconf_mod_dad_work, if the dad work is idle and queued, it needs to hold ifa. The problem is there's one gap in [1], during which if the pending dad work is removed elsewhere. It will miss to hold ifa, but the dad word is still idea and queue. if (!delayed_work_pending(&ifp->dad_work)) in6_ifa_hold(ifp); <--------------[1] mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); An use-after-free issue can be caused by this. Chen Wei found this issue when WARN_ON(!hlist_unhashed(&ifp->addr_lst)) in net6_ifa_finish_destroy was hit because of it. As Hannes' suggestion, this patch is to fix it by holding ifa first in addrconf_mod_dad_work, then calling mod_delayed_work and putting ifa if the dad_work is already in queue. Note that this patch did not choose to fix it with: if (!mod_delayed_work(delay)) in6_ifa_hold(ifp); As with it, when delay == 0, dad_work would be scheduled immediately, all addrconf_mod_dad_work(0) callings had to be moved under ifp->lock. Reported-by: Wei Chen Suggested-by: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0457383..b6f4c42 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -315,9 +315,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, unsigned long delay) { - if (!delayed_work_pending(&ifp->dad_work)) - in6_ifa_hold(ifp); - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); + in6_ifa_hold(ifp); + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) + in6_ifa_put(ifp); } static int snmp6_alloc_dev(struct inet6_dev *idev) -- cgit v0.10.2 From 4c246863e7b42eaecbaf90c319720bbf426b5958 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 15 Jun 2017 17:49:08 +0800 Subject: sctp: return next obj by passing pos + 1 into sctp_transport_get_idx [ Upstream commit 988c7322116970696211e902b468aefec95b6ec4 ] In sctp_for_each_transport, pos is used to save how many objs it has dumped. Now it gets the last obj by sctp_transport_get_idx, then gets the next obj by sctp_transport_get_next. The issue is that in the meanwhile if some objs in transport hashtable are removed and the objs nums are less than pos, sctp_transport_get_idx would return NULL and hti.walker.tbl is NULL as well. At this moment it should stop hti, instead of continue getting the next obj. Or it would cause a NULL pointer dereference in sctp_transport_get_next. This patch is to pass pos + 1 into sctp_transport_get_idx to get the next obj directly, even if pos > objs nums, it would return NULL and stop hti. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0c5257e..487c127 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4506,9 +4506,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), if (err) return err; - sctp_transport_get_idx(net, &hti, pos); - obj = sctp_transport_get_next(net, &hti); - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { + obj = sctp_transport_get_idx(net, &hti, pos + 1); + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { struct sctp_transport *transport = obj; if (!sctp_transport_hold(transport)) -- cgit v0.10.2 From 176b9874a203ae170912b063999e2c00d56b9ee6 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 15 Jun 2017 20:08:32 +0300 Subject: net/mlx5e: Avoid doing a cleanup call if the profile doesn't have it [ Upstream commit 31ac93386d135a6c96de9c8bab406f5ccabf5a4d ] The error flow of mlx5e_create_netdev calls the cleanup call of the given profile without checking if it exists, fix that. Currently the VF reps don't register that callback and we crash if getting into error -- can be reproduced by the user doing ctrl^C while attempting to change the sriov mode from legacy to switchdev. Fixes: 26e59d8077a3 '(net/mlx5e: Implement mlx5e interface attach/detach callbacks') Signed-off-by: Or Gerlitz Reported-by: Sabrina Dubroca Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d4fa851..ea58234 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3846,7 +3846,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return netdev; err_cleanup_nic: - profile->cleanup(priv); + if (profile->cleanup) + profile->cleanup(priv); free_netdev(netdev); return NULL; -- cgit v0.10.2 From 25ff35074e276b457f16c00f97afea41b6d5051d Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 8 Jun 2017 11:33:16 -0500 Subject: net/mlx5: Wait for FW readiness before initializing command interface [ Upstream commit 6c780a0267b8a1075f40b39851132eeaefefcff5 ] Before attempting to initialize the command interface we must wait till the fw_initializing bit is clear. If we fail to meet this condition the hardware will drop our configuration, specifically the descriptors page address. This scenario can happen when the firmware is still executing an FLR flow and did not finish yet so the driver needs to wait for that to finish. Fixes: e3297246c2c8 ('net/mlx5_core: Wait for FW readiness on startup') Signed-off-by: Eli Cohen Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d776db7..5bea0bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -155,8 +155,9 @@ static struct mlx5_profile profile[] = { }, }; -#define FW_INIT_TIMEOUT_MILI 2000 -#define FW_INIT_WAIT_MS 2 +#define FW_INIT_TIMEOUT_MILI 2000 +#define FW_INIT_WAIT_MS 2 +#define FW_PRE_INIT_TIMEOUT_MILI 10000 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) { @@ -956,6 +957,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, */ dev->state = MLX5_DEVICE_STATE_UP; + /* wait for firmware to accept initialization segments configurations + */ + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", + FW_PRE_INIT_TIMEOUT_MILI); + goto out; + } + err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); -- cgit v0.10.2 From c7d422d68fe98627ea9f60d06e38dc7f1af302b9 Mon Sep 17 00:00:00 2001 From: Maor Dickman Date: Thu, 18 May 2017 15:15:08 +0300 Subject: net/mlx5e: Fix timestamping capabilities reporting [ Upstream commit f0b381178b01b831f9907d72f467d6443afdea67 ] Misuse of (BIT) macro caused to report wrong flags for "Hardware Transmit Timestamp Modes" and "Hardware Receive Filter Modes" Fixes: ef9814deafd0 ('net/mlx5e: Add HW timestamping (TS) support') Signed-off-by: Maor Dickman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3744e2f..da1d73f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1183,11 +1183,11 @@ static int mlx5e_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | - (BIT(1) << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | - (BIT(1) << HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); return 0; } -- cgit v0.10.2 From f1a0e7d172b01e258a8c0ca6c67d003fbac54f64 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 16 Jun 2017 10:46:37 -0700 Subject: decnet: always not take dst->__refcnt when inserting dst into hash table [ Upstream commit 76371d2e3ad1f84426a30ebcd8c3b9b98f4c724f ] In the existing dn_route.c code, dn_route_output_slow() takes dst->__refcnt before calling dn_insert_route() while dn_route_input_slow() does not take dst->__refcnt before calling dn_insert_route(). This makes the whole routing code very buggy. In dn_dst_check_expire(), dnrt_free() is called when rt expires. This makes the routes inserted by dn_route_output_slow() not able to be freed as the refcnt is not released. In dn_dst_gc(), dnrt_drop() is called to release rt which could potentially cause the dst->__refcnt to be dropped to -1. In dn_run_flush(), dst_free() is called to release all the dst. Again, it makes the dst inserted by dn_route_output_slow() not able to be released and also, it does not wait on the rcu and could potentially cause crash in the path where other users still refer to this dst. This patch makes sure both input and output path do not take dst->__refcnt before calling dn_insert_route() and also makes sure dnrt_free()/dst_free() is called when removing dst from the hash table. The only difference between those 2 calls is that dnrt_free() waits on the rcu while dst_free() does not. Signed-off-by: Wei Wang Acked-by: Martin KaFai Lau Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index b1dc096..403593b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt) call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } -static inline void dnrt_drop(struct dn_route *rt) -{ - dst_release(&rt->dst); - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); -} - static void dn_dst_check_expire(unsigned long dummy) { int i; @@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops) } *rtp = rt->dst.dn_next; rt->dst.dn_next = NULL; - dnrt_drop(rt); + dnrt_free(rt); break; } spin_unlock_bh(&dn_rt_hash_table[i].lock); @@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou dst_use(&rth->dst, now); spin_unlock_bh(&dn_rt_hash_table[hash].lock); - dnrt_drop(rt); + dst_free(&rt->dst); *rp = rth; return 0; } @@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy) for(; rt; rt = next) { next = rcu_dereference_raw(rt->dst.dn_next); RCU_INIT_POINTER(rt->dst.dn_next, NULL); - dst_free((struct dst_entry *)rt); + dnrt_free(rt); } nothing_to_declare: @@ -1187,7 +1181,7 @@ make_route: if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); if (rt == NULL) goto e_nobufs; -- cgit v0.10.2 From 08058c258afba77abf1fe6f4d327d3154a2bc336 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Fri, 16 Jun 2017 15:00:02 +0800 Subject: net: 8021q: Fix one possible panic caused by BUG_ON in free_netdev [ Upstream commit 9745e362add89432d2c951272a99b0a5fe4348a9 ] The register_vlan_device would invoke free_netdev directly, when register_vlan_dev failed. It would trigger the BUG_ON in free_netdev if the dev was already registered. In this case, the netdev would be freed in netdev_run_todo later. So add one condition check now. Only when dev is not registered, then free it directly. The following is the part coredump when netdev_upper_dev_link failed in register_vlan_dev. I removed the lines which are too long. [ 411.237457] ------------[ cut here ]------------ [ 411.237458] kernel BUG at net/core/dev.c:7998! [ 411.237484] invalid opcode: 0000 [#1] SMP [ 411.237705] [last unloaded: 8021q] [ 411.237718] CPU: 1 PID: 12845 Comm: vconfig Tainted: G E 4.12.0-rc5+ #6 [ 411.237737] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 411.237764] task: ffff9cbeb6685580 task.stack: ffffa7d2807d8000 [ 411.237782] RIP: 0010:free_netdev+0x116/0x120 [ 411.237794] RSP: 0018:ffffa7d2807dbdb0 EFLAGS: 00010297 [ 411.237808] RAX: 0000000000000002 RBX: ffff9cbeb6ba8fd8 RCX: 0000000000001878 [ 411.237826] RDX: 0000000000000001 RSI: 0000000000000282 RDI: 0000000000000000 [ 411.237844] RBP: ffffa7d2807dbdc8 R08: 0002986100029841 R09: 0002982100029801 [ 411.237861] R10: 0004000100029980 R11: 0004000100029980 R12: ffff9cbeb6ba9000 [ 411.238761] R13: ffff9cbeb6ba9060 R14: ffff9cbe60f1a000 R15: ffff9cbeb6ba9000 [ 411.239518] FS: 00007fb690d81700(0000) GS:ffff9cbebb640000(0000) knlGS:0000000000000000 [ 411.239949] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 411.240454] CR2: 00007f7115624000 CR3: 0000000077cdf000 CR4: 00000000003406e0 [ 411.240936] Call Trace: [ 411.241462] vlan_ioctl_handler+0x3f1/0x400 [8021q] [ 411.241910] sock_ioctl+0x18b/0x2c0 [ 411.242394] do_vfs_ioctl+0xa1/0x5d0 [ 411.242853] ? sock_alloc_file+0xa6/0x130 [ 411.243465] SyS_ioctl+0x79/0x90 [ 411.243900] entry_SYSCALL_64_fastpath+0x1e/0xa9 [ 411.244425] RIP: 0033:0x7fb69089a357 [ 411.244863] RSP: 002b:00007ffcd04e0fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000010 [ 411.245445] RAX: ffffffffffffffda RBX: 00007ffcd04e2884 RCX: 00007fb69089a357 [ 411.245903] RDX: 00007ffcd04e0fd0 RSI: 0000000000008983 RDI: 0000000000000003 [ 411.246527] RBP: 00007ffcd04e0fd0 R08: 0000000000000000 R09: 1999999999999999 [ 411.246976] R10: 000000000000053f R11: 0000000000000202 R12: 0000000000000004 [ 411.247414] R13: 00007ffcd04e1128 R14: 00007ffcd04e2888 R15: 0000000000000001 [ 411.249129] RIP: free_netdev+0x116/0x120 RSP: ffffa7d2807dbdb0 Signed-off-by: Gao Feng Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index f2531ad..8d213f9 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) return 0; out_free_newdev: - free_netdev(new_dev); + if (new_dev->reg_state == NETREG_UNINITIALIZED) + free_netdev(new_dev); return err; } -- cgit v0.10.2 From e4089baa08c4a1fba87c19f8d018ecf032cab0b5 Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Fri, 16 Jun 2017 09:45:08 +0100 Subject: sfc: provide dummy definitions of vswitch functions efx_probe_all() calls efx->type->vswitching_probe during probe. For SFC4000 (Falcon) NICs this function is not defined, leading to a BUG with the top of the call stack similar to: ? efx_pci_probe_main+0x29a/0x830 efx_pci_probe+0x7d3/0xe70 vswitching_restore and vswitching_remove also need to be defined. Fixed in mainline by: commit 5a6681e22c14 ("sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver") Fixes: 6d8aaaf6f798 ("sfc: create VEB vswitch and vport above default firmware setup") Signed-off-by: Bert Kenward Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 1a70926..1bfb214 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2801,6 +2801,11 @@ const struct efx_nic_type falcon_a1_nic_type = { .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM, .mcdi_max_ver = -1, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, +#endif }; const struct efx_nic_type falcon_b0_nic_type = { @@ -2902,4 +2907,9 @@ const struct efx_nic_type falcon_b0_nic_type = { .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .mcdi_max_ver = -1, .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, +#endif }; -- cgit v0.10.2 From b9ca9b0f551080aeb5adf7ab1b5f0c47c3e83f57 Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Tue, 20 Jun 2017 13:29:25 +0300 Subject: ipv6: Do not leak throw route references [ Upstream commit 07f615574f8ac499875b21c1142f26308234a92c ] While commit 73ba57bfae4a ("ipv6: fix backtracking for throw routes") does good job on error propagation to the fib_rules_lookup() in fib rules core framework that also corrects throw routes handling, it does not solve route reference leakage problem happened when we return -EAGAIN to the fib_rules_lookup() and leave routing table entry referenced in arg->result. If rule with matched throw route isn't last matched in the list we overwrite arg->result losing reference on throw route stored previously forever. We also partially revert commit ab997ad40839 ("ipv6: fix the incorrect return value of throw route") since we never return routing table entry with dst.error == -EAGAIN when CONFIG_IPV6_MULTIPLE_TABLES is on. Also there is no point to check for RTF_REJECT flag since it is always set throw route. Fixes: 73ba57bfae4a ("ipv6: fix backtracking for throw routes") Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index eea23b5..ec849d8 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -32,7 +32,6 @@ struct fib6_rule { struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { - struct rt6_info *rt; struct fib_lookup_arg arg = { .lookup_ptr = lookup, .flags = FIB_LOOKUP_NOREF, @@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, fib_rules_lookup(net->ipv6.fib6_rules_ops, flowi6_to_flowi(fl6), flags, &arg); - rt = arg.result; + if (arg.result) + return arg.result; - if (!rt) { - dst_hold(&net->ipv6.ip6_null_entry->dst); - return &net->ipv6.ip6_null_entry->dst; - } - - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { - ip6_rt_put(rt); - rt = net->ipv6.ip6_null_entry; - dst_hold(&rt->dst); - } - - return &rt->dst; + dst_hold(&net->ipv6.ip6_null_entry->dst); + return &net->ipv6.ip6_null_entry->dst; } static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, @@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, flp6->saddr = saddr; } err = rt->dst.error; - goto out; + if (err != -EAGAIN) + goto out; } again: ip6_rt_put(rt); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 8c88a37..636d4d8 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, struct rt6_info *rt; rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { + if (rt->dst.error == -EAGAIN) { ip6_rt_put(rt); rt = net->ipv6.ip6_null_entry; dst_hold(&rt->dst); -- cgit v0.10.2 From 8e2316399b8faa87496886506f145ed988cf5c68 Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Tue, 20 Jun 2017 14:35:23 +0300 Subject: rtnetlink: add IFLA_GROUP to ifla_policy [ Upstream commit db833d40ad3263b2ee3b59a1ba168bb3cfed8137 ] Network interface groups support added while ago, however there is no IFLA_GROUP attribute description in policy and netlink message size calculations until now. Add IFLA_GROUP attribute to the policy. Fixes: cbda10fa97d7 ("net_device: add support for network device groups") Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d574409..9c6fd7f 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -937,6 +937,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ + nla_total_size(4) /* IFLA_LINK_NETNSID */ + + nla_total_size(4) /* IFLA_GROUP */ + nla_total_size(ext_filter_mask & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ @@ -1465,6 +1466,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, [IFLA_XDP] = { .type = NLA_NESTED }, + [IFLA_GROUP] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { -- cgit v0.10.2 From ced7689be60ddcac4b1746212c547e8817c5ae5e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 3 Apr 2017 10:55:11 -0700 Subject: netfilter: xt_TCPMSS: add more sanity tests on tcph->doff commit 2638fd0f92d4397884fd991d8f4925cb3f081901 upstream. Denys provided an awesome KASAN report pointing to an use after free in xt_TCPMSS I have provided three patches to fix this issue, either in xt_TCPMSS or in xt_tcpudp.c. It seems xt_TCPMSS patch has the smallest possible impact. Signed-off-by: Eric Dumazet Reported-by: Denys Fedoryshchenko Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 872db2d..119e51f 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; - if (len < tcp_hdrlen) + if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (len > tcp_hdrlen) return 0; + /* tcph->doff has 4 bits, do not wrap it to 0 */ + if (tcp_hdrlen >= 15 * 4) + return 0; + /* * MSS Option not found ?! add it.. */ -- cgit v0.10.2 From b89bd0c715c148ea3cfef6b250482a77225573b5 Mon Sep 17 00:00:00 2001 From: Eric Leblond Date: Thu, 11 May 2017 18:56:38 +0200 Subject: netfilter: synproxy: fix conntrackd interaction commit 87e94dbc210a720a34be5c1174faee5c84be963e upstream. This patch fixes the creation of connection tracking entry from netlink when synproxy is used. It was missing the addition of the synproxy extension. This was causing kernel crashes when a conntrack entry created by conntrackd was used after the switch of traffic from active node to the passive node. Signed-off-by: Eric Leblond Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2754045..04111c1 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #ifdef CONFIG_NF_NAT_NEEDED #include #include @@ -1800,6 +1802,8 @@ ctnetlink_create_conntrack(struct net *net, nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); nf_ct_labels_ext_add(ct); + nfct_seqadj_ext_add(ct); + nfct_synproxy_ext_add(ct); /* we must add conntrack extensions before confirmation. */ ct->status |= IPS_CONFIRMED; -- cgit v0.10.2 From 955f270b6f5d7d830188de1f05f055180a8712dc Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Mon, 6 Mar 2017 22:29:14 +0800 Subject: NFSv4: fix a reference leak caused WARNING messages commit 366a1569bff3fe14abfdf9285e31e05e091745f5 upstream. Because nfs4_opendata_access() has close the state when access is denied, so the state isn't leak. Rather than revert the commit a974deee47, I'd like clean the strange state close. [ 1615.094218] ------------[ cut here ]------------ [ 1615.094607] WARNING: CPU: 0 PID: 23702 at lib/list_debug.c:31 __list_add_valid+0x8e/0xa0 [ 1615.094913] list_add double add: new=ffff9d7901d9f608, prev=ffff9d7901d9f608, next=ffff9d7901ee8dd0. [ 1615.095458] Modules linked in: nfsv4(E) nfs(E) nfsd(E) tun bridge stp llc fuse ip_set nfnetlink vmw_vsock_vmci_transport vsock f2fs snd_seq_midi snd_seq_midi_event fscrypto coretemp ppdev crct10dif_pclmul crc32_pclmul ghash_clmulni_intel intel_rapl_perf vmw_balloon snd_ens1371 joydev gameport snd_ac97_codec ac97_bus snd_seq snd_pcm snd_rawmidi snd_timer snd_seq_device snd soundcore nfit parport_pc parport acpi_cpufreq tpm_tis tpm_tis_core tpm i2c_piix4 vmw_vmci shpchp auth_rpcgss nfs_acl lockd(E) grace sunrpc(E) xfs libcrc32c vmwgfx drm_kms_helper ttm drm crc32c_intel mptspi e1000 serio_raw scsi_transport_spi mptscsih mptbase ata_generic pata_acpi fjes [last unloaded: nfs] [ 1615.097663] CPU: 0 PID: 23702 Comm: fstest Tainted: G W E 4.11.0-rc1+ #517 [ 1615.098015] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 1615.098807] Call Trace: [ 1615.099183] dump_stack+0x63/0x86 [ 1615.099578] __warn+0xcb/0xf0 [ 1615.099967] warn_slowpath_fmt+0x5f/0x80 [ 1615.100370] __list_add_valid+0x8e/0xa0 [ 1615.100760] nfs4_put_state_owner+0x75/0xc0 [nfsv4] [ 1615.101136] __nfs4_close+0x109/0x140 [nfsv4] [ 1615.101524] nfs4_close_state+0x15/0x20 [nfsv4] [ 1615.101949] nfs4_close_context+0x21/0x30 [nfsv4] [ 1615.102691] __put_nfs_open_context+0xb8/0x110 [nfs] [ 1615.103155] put_nfs_open_context+0x10/0x20 [nfs] [ 1615.103586] nfs4_file_open+0x13b/0x260 [nfsv4] [ 1615.103978] do_dentry_open+0x20a/0x2f0 [ 1615.104369] ? nfs4_copy_file_range+0x30/0x30 [nfsv4] [ 1615.104739] vfs_open+0x4c/0x70 [ 1615.105106] ? may_open+0x5a/0x100 [ 1615.105469] path_openat+0x623/0x1420 [ 1615.105823] do_filp_open+0x91/0x100 [ 1615.106174] ? __alloc_fd+0x3f/0x170 [ 1615.106568] do_sys_open+0x130/0x220 [ 1615.106920] ? __put_cred+0x3d/0x50 [ 1615.107256] SyS_open+0x1e/0x20 [ 1615.107588] entry_SYSCALL_64_fastpath+0x1a/0xa9 [ 1615.107922] RIP: 0033:0x7fab599069b0 [ 1615.108247] RSP: 002b:00007ffcf0600d78 EFLAGS: 00000246 ORIG_RAX: 0000000000000002 [ 1615.108575] RAX: ffffffffffffffda RBX: 00007fab59bcfae0 RCX: 00007fab599069b0 [ 1615.108896] RDX: 0000000000000200 RSI: 0000000000000200 RDI: 00007ffcf060255e [ 1615.109211] RBP: 0000000000040010 R08: 0000000000000000 R09: 0000000000000016 [ 1615.109515] R10: 00000000000006a1 R11: 0000000000000246 R12: 0000000000041000 [ 1615.109806] R13: 0000000000040010 R14: 0000000000001000 R15: 0000000000002710 [ 1615.110152] ---[ end trace 96ed63b1306bf2f3 ]--- Fixes: a974deee47 ("NFSv4: Fix memory and state leak in...") Signed-off-by: Kinglong Mee Signed-off-by: Anna Schumaker Cc: Trond Myklebust Signed-off-by: Greg Kroah-Hartman diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fc9b049..4a64fa0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2343,8 +2343,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred, if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) return 0; - /* even though OPEN succeeded, access is denied. Close the file */ - nfs4_close_state(state, fmode); return -EACCES; } -- cgit v0.10.2 From 4ebe28d23d35df2e69542c0146a74d21834ef235 Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Thu, 27 Apr 2017 11:13:38 +0800 Subject: NFSv4.x/callback: Create the callback service through svc_create_pooled commit df807fffaabde625fa9adb82e3e5b88cdaa5709a upstream. As the comments for svc_set_num_threads() said, " Destroying threads relies on the service threads filling in rqstp->rq_task, which only the nfs ones do. Assumes the serv has been created using svc_create_pooled()." If creating service through svc_create(), the svc_pool_map_put() will be called in svc_destroy(), but the pool map isn't used. So that, the reference of pool map will be drop, the next using of pool map will get a zero npools. [ 137.992130] divide error: 0000 [#1] SMP [ 137.992148] Modules linked in: nfsd(E) nfsv4 nfs fscache fuse tun bridge stp llc ip_set nfnetlink vmw_vsock_vmci_transport vsock snd_seq_midi snd_seq_midi_event vmw_balloon coretemp crct10dif_pclmul crc32_pclmul ppdev ghash_clmulni_intel intel_rapl_perf joydev snd_ens1371 gameport snd_ac97_codec ac97_bus snd_seq snd_pcm snd_rawmidi snd_timer snd_seq_device snd soundcore parport_pc parport nfit acpi_cpufreq tpm_tis tpm_tis_core tpm vmw_vmci i2c_piix4 shpchp auth_rpcgss nfs_acl lockd(E) grace sunrpc(E) xfs libcrc32c vmwgfx drm_kms_helper ttm crc32c_intel drm e1000 mptspi scsi_transport_spi serio_raw mptscsih mptbase ata_generic pata_acpi [last unloaded: nfsd] [ 137.992336] CPU: 0 PID: 4514 Comm: rpc.nfsd Tainted: G E 4.11.0-rc8+ #536 [ 137.992777] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 137.993757] task: ffff955984101d00 task.stack: ffff9873c2604000 [ 137.994231] RIP: 0010:svc_pool_for_cpu+0x2b/0x80 [sunrpc] [ 137.994768] RSP: 0018:ffff9873c2607c18 EFLAGS: 00010246 [ 137.995227] RAX: 0000000000000000 RBX: ffff95598376f000 RCX: 0000000000000002 [ 137.995673] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff9559944aec00 [ 137.996156] RBP: ffff9873c2607c18 R08: ffff9559944aec28 R09: 0000000000000000 [ 137.996609] R10: 0000000001080002 R11: 0000000000000000 R12: ffff95598376f010 [ 137.997063] R13: ffff95598376f018 R14: ffff9559944aec28 R15: ffff9559944aec00 [ 137.997584] FS: 00007f755529eb40(0000) GS:ffff9559bb600000(0000) knlGS:0000000000000000 [ 137.998048] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 137.998548] CR2: 000055f3aecd9660 CR3: 0000000084290000 CR4: 00000000001406f0 [ 137.999052] Call Trace: [ 137.999517] svc_xprt_do_enqueue+0xef/0x260 [sunrpc] [ 138.000028] svc_xprt_received+0x47/0x90 [sunrpc] [ 138.000487] svc_add_new_perm_xprt+0x76/0x90 [sunrpc] [ 138.000981] svc_addsock+0x14b/0x200 [sunrpc] [ 138.001424] ? recalc_sigpending+0x1b/0x50 [ 138.001860] ? __getnstimeofday64+0x41/0xd0 [ 138.002346] ? do_gettimeofday+0x29/0x90 [ 138.002779] write_ports+0x255/0x2c0 [nfsd] [ 138.003202] ? _copy_from_user+0x4e/0x80 [ 138.003676] ? write_recoverydir+0x100/0x100 [nfsd] [ 138.004098] nfsctl_transaction_write+0x48/0x80 [nfsd] [ 138.004544] __vfs_write+0x37/0x160 [ 138.004982] ? selinux_file_permission+0xd7/0x110 [ 138.005401] ? security_file_permission+0x3b/0xc0 [ 138.005865] vfs_write+0xb5/0x1a0 [ 138.006267] SyS_write+0x55/0xc0 [ 138.006654] entry_SYSCALL_64_fastpath+0x1a/0xa9 [ 138.007071] RIP: 0033:0x7f7554b9dc30 [ 138.007437] RSP: 002b:00007ffc9f92c788 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 138.007807] RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007f7554b9dc30 [ 138.008168] RDX: 0000000000000002 RSI: 00005640cd536640 RDI: 0000000000000003 [ 138.008573] RBP: 00007ffc9f92c780 R08: 0000000000000001 R09: 0000000000000002 [ 138.008918] R10: 0000000000000064 R11: 0000000000000246 R12: 0000000000000004 [ 138.009254] R13: 00005640cdbf77a0 R14: 00005640cdbf7720 R15: 00007ffc9f92c238 [ 138.009610] Code: 0f 1f 44 00 00 48 8b 87 98 00 00 00 55 48 89 e5 48 83 78 08 00 74 10 8b 05 07 42 02 00 83 f8 01 74 40 83 f8 02 74 19 31 c0 31 d2 b7 88 00 00 00 5d 89 d0 48 c1 e0 07 48 03 87 90 00 00 00 c3 [ 138.010664] RIP: svc_pool_for_cpu+0x2b/0x80 [sunrpc] RSP: ffff9873c2607c18 [ 138.011061] ---[ end trace b3468224cafa7d11 ]--- Signed-off-by: Kinglong Mee Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 484bebc..0a21150 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -279,7 +279,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion) printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n", cb_info->users); - serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); + serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); if (!serv) { printk(KERN_ERR "nfs_callback_create_svc: create service failed\n"); return ERR_PTR(-ENOMEM); -- cgit v0.10.2 From 8dc9f9dede5b92658a1bb32866e11905933d2b48 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:49 +0200 Subject: xen/blkback: don't use xen_blkif_get() in xen-blkback kthread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a24fa22ce22ae302b3bf8f7008896d52d5d57b8d upstream. There is no need to use xen_blkif_get()/xen_blkif_put() in the kthread of xen-blkback. Thread stopping is synchronous and using the blkif reference counting in the kthread will avoid to ever let the reference count drop to zero at the end of an I/O running concurrent to disconnecting and multiple rings. Setting ring->xenblkd to NULL after stopping the kthread isn't needed as the kthread does this already. Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index c42202d..d6eaaa2 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg) unsigned long timeout; int ret; - xen_blkif_get(blkif); - set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) @@ -665,7 +663,6 @@ purge_gnt_list: print_stats(ring); ring->xenblkd = NULL; - xen_blkif_put(blkif); return 0; } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 9b69fe4..d8fc9c5 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -255,7 +255,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (ring->xenblkd) { kthread_stop(ring->xenblkd); wake_up(&ring->shutdown_wq); - ring->xenblkd = NULL; } /* The above kthread_stop() guarantees that at this point we -- cgit v0.10.2 From dbc808362b6cb2124f36b14ce354abcc64b6f1bb Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Fri, 17 Feb 2017 14:33:01 +1100 Subject: drm/ast: Handle configuration without P2A bridge commit 71f677a91046599ece96ebab21df956ce909c456 upstream. The ast driver configures a window to enable access into BMC memory space in order to read some configuration registers. If this window is disabled, which it can be from the BMC side, the ast driver can't function. Closing this window is a necessity for security if a machine's host side and BMC side are controlled by different parties; i.e. a cloud provider offering machines "bare metal". A recent patch went in to try to check if that window is open but it does so by trying to access the registers in question and testing if the result is 0xffffffff. This method will trigger a PCIe error when the window is closed which on some systems will be fatal (it will trigger an EEH for example on POWER which will take out the device). This patch improves this in two ways: - First, if the firmware has put properties in the device-tree containing the relevant configuration information, we use these. - Otherwise, a bit in one of the SCU scratch registers (which are readable via the VGA register space and writeable by the BMC) will indicate if the BMC has closed the window. This bit has been defined by Y.C Chen from Aspeed. If the window is closed and the configuration isn't available from the device-tree, some sane defaults are used. Those defaults are hopefully sufficient for standard video modes used on a server. Signed-off-by: Russell Currey Acked-by: Joel Stanley Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Dave Airlie Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 7abda94..3bedcf7 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,7 +113,11 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; - bool DisableP2A; + enum { + ast_use_p2a, + ast_use_dt, + ast_use_defaults + } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 533e762..fb99762 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, return ret; } +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", + scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + DRM_INFO("Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (dev->pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + DRM_INFO("Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + DRM_INFO("P2A bridge disabled, using default configuration\n"); +} static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = dev->dev_private; - uint32_t data, jreg; + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + + /* Enable extended register access */ + ast_enable_mmio(dev); ast_open_key(ast); + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; DRM_INFO("AST 1180 detected\n"); @@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->chip = AST2300; DRM_INFO("AST 2300 detected\n"); } else if (dev->pdev->revision >= 0x10) { - uint32_t data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - data = ast_read32(ast, 0x1207c); - switch (data & 0x0300) { + switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; DRM_INFO("AST 1100 detected\n"); @@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } } - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) - */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - ast_enable_mmio(dev); - DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - - /* Check P2A Access */ - ast->DisableP2A = true; - data = ast_read32(ast, 0xf004); - if (data != 0xFFFFFFFF) - ast->DisableP2A = false; - /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - if (ast->DisableP2A == false) { - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; - } + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; } break; } @@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { + struct device_node *np = dev->pdev->dev.of_node; struct ast_private *ast = dev->dev_private; - uint32_t data, data2; - uint32_t denum, num, div, ref_pll; + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; - if (ast->DisableP2A) - { + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: ast->dram_bus_width = 16; ast->dram_type = AST_DRAM_1Gx16; ast->mclk = 396; + return 0; } - else - { - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) - ast->dram_bus_width = 16; - else - ast->dram_bus_width = 32; - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; - break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; - break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; - break; - } - } + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; break; - case 2: + default: case 1: - div = 0x2; + ast->dram_type = AST_DRAM_1Gx16; break; - default: - div = 0x1; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; break; } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 270e8fb..c7c58be 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev) ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->DisableP2A == false) - { + if (ast->config_mode == ast_use_p2a) { if (ast->chip == AST2300 || ast->chip == AST2400) ast_init_dram_2300(dev); else ast_init_dram_reg(dev); ast_init_3rdtx(dev); - } - else - { + } else { if (ast->tx_chip_type != AST_TX_NONE) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } -- cgit v0.10.2 From b1355226a64e6301ca63aee1e78728887e3527f1 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 7 Apr 2017 16:05:00 -0700 Subject: mm, swap_cgroup: reschedule when neeed in swap_cgroup_swapoff() commit 460bcec84e11c75122ace5976214abbc596eb91b upstream. We got need_resched() warnings in swap_cgroup_swapoff() because swap_cgroup_ctrl[type].length is particularly large. Reschedule when needed. Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1704061315270.80559@chino.kir.corp.google.com Signed-off-by: David Rientjes Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Vladimir Davydov Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 454d6d7..3405b4e 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -204,6 +204,8 @@ void swap_cgroup_swapoff(int type) struct page *page = map[i]; if (page) __free_page(page); + if (!(i % SWAP_CLUSTER_MAX)) + cond_resched(); } vfree(map); } -- cgit v0.10.2 From 3d4ac49a9538c36d9c3c121ddbcb4c3958dee5e9 Mon Sep 17 00:00:00 2001 From: Karl Beldan Date: Tue, 27 Jun 2017 19:22:16 +0000 Subject: MIPS: head: Reorder instructions missing a delay slot commit 25d8b92e0af75d72ce8b99e63e5a449cc0888efa upstream. In this sequence the 'move' is assumed in the delay slot of the 'beq', but head.S is in reorder mode and the former gets pushed one 'nop' farther by the assembler. The corrected behavior made booting with an UHI supplied dtb erratic. Fixes: 15f37e158892 ("MIPS: store the appended dtb address in a variable") Signed-off-by: Karl Beldan Reviewed-by: James Hogan Cc: Jonas Gorski Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16614/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index cf05220..d1bb506 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point beq t0, t1, dtb_found #endif li t1, -2 - beq a0, t1, dtb_found move t2, a1 + beq a0, t1, dtb_found li t2, 0 dtb_found: -- cgit v0.10.2 From f7d3d40ea1242f633bbf093b63181acff2da319e Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 29 Jun 2017 15:05:04 +0100 Subject: MIPS: Avoid accidental raw backtrace commit 854236363370995a609a10b03e35fd3dc5e9e4a1 upstream. Since commit 81a76d7119f6 ("MIPS: Avoid using unwind_stack() with usermode") show_backtrace() invokes the raw backtracer when cp0_status & ST0_KSU indicates user mode to fix issues on EVA kernels where user and kernel address spaces overlap. However this is used by show_stack() which creates its own pt_regs on the stack and leaves cp0_status uninitialised in most of the code paths. This results in the non deterministic use of the raw back tracer depending on the previous stack content. show_stack() deals exclusively with kernel mode stacks anyway, so explicitly initialise regs.cp0_status to KSU_KERNEL (i.e. 0) to ensure we get a useful backtrace. Fixes: 81a76d7119f6 ("MIPS: Avoid using unwind_stack() with usermode") Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16656/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ec87ef9..b0b29cb 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -199,6 +199,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); + + regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; -- cgit v0.10.2 From e9e24faf823e58713115974ab50102319c33a34d Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 2 Mar 2017 14:02:40 -0800 Subject: MIPS: pm-cps: Drop manual cache-line alignment of ready_count commit 161c51ccb7a6faf45ffe09aa5cf1ad85ccdad503 upstream. We allocate memory for a ready_count variable per-CPU, which is accessed via a cached non-coherent TLB mapping to perform synchronisation between threads within the core using LL/SC instructions. In order to ensure that the variable is contained within its own data cache line we allocate 2 lines worth of memory & align the resulting pointer to a line boundary. This is however unnecessary, since kmalloc is guaranteed to return memory which is at least cache-line aligned (see ARCH_DMA_MINALIGN). Stop the redundant manual alignment. Besides cleaning up the code & avoiding needless work, this has the side effect of avoiding an arithmetic error found by Bryan on 64 bit systems due to the 32 bit size of the former dlinesz. This led the ready_count variable to have its upper 32b cleared erroneously for MIPS64 kernels, causing problems when ready_count was later used on MIPS64 via cpuidle. Signed-off-by: Paul Burton Fixes: 3179d37ee1ed ("MIPS: pm-cps: add PM state entry code for CPS systems") Reported-by: Bryan O'Donoghue Reviewed-by: Bryan O'Donoghue Tested-by: Bryan O'Donoghue Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15383/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 7cf653e..60c4d45 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); @@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; - unsigned dlinesz = cpu_data[cpu].dcache.linesz; void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu) } if (!per_cpu(ready_count, core)) { - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count_alloc, core) = core_rc; - - /* Ensure ready_count is aligned to a cacheline boundary */ - core_rc += dlinesz - 1; - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); per_cpu(ready_count, core) = core_rc; } -- cgit v0.10.2 From dad3135e762bdb66318fe5ab902db5c4fbb1ad2f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 3 Mar 2017 15:26:05 -0800 Subject: MIPS: Fix IRQ tracing & lockdep when rescheduling commit d8550860d910c6b7b70f830f59003b33daaa52c9 upstream. When the scheduler sets TIF_NEED_RESCHED & we call into the scheduler from arch/mips/kernel/entry.S we disable interrupts. This is true regardless of whether we reach work_resched from syscall_exit_work, resume_userspace or by looping after calling schedule(). Although we disable interrupts in these paths we don't call trace_hardirqs_off() before calling into C code which may acquire locks, and we therefore leave lockdep with an inconsistent view of whether interrupts are disabled or not when CONFIG_PROVE_LOCKING & CONFIG_DEBUG_LOCKDEP are both enabled. Without tracing this interrupt state lockdep will print warnings such as the following once a task returns from a syscall via syscall_exit_partial with TIF_NEED_RESCHED set: [ 49.927678] ------------[ cut here ]------------ [ 49.934445] WARNING: CPU: 0 PID: 1 at kernel/locking/lockdep.c:3687 check_flags.part.41+0x1dc/0x1e8 [ 49.946031] DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled) [ 49.946355] CPU: 0 PID: 1 Comm: init Not tainted 4.10.0-00439-gc9fd5d362289-dirty #197 [ 49.963505] Stack : 0000000000000000 ffffffff81bb5d6a 0000000000000006 ffffffff801ce9c4 [ 49.974431] 0000000000000000 0000000000000000 0000000000000000 000000000000004a [ 49.985300] ffffffff80b7e487 ffffffff80a24498 a8000000ff160000 ffffffff80ede8b8 [ 49.996194] 0000000000000001 0000000000000000 0000000000000000 0000000077c8030c [ 50.007063] 000000007fd8a510 ffffffff801cd45c 0000000000000000 a8000000ff127c88 [ 50.017945] 0000000000000000 ffffffff801cf928 0000000000000001 ffffffff80a24498 [ 50.028827] 0000000000000000 0000000000000001 0000000000000000 0000000000000000 [ 50.039688] 0000000000000000 a8000000ff127bd0 0000000000000000 ffffffff805509bc [ 50.050575] 00000000140084e0 0000000000000000 0000000000000000 0000000000040a00 [ 50.061448] 0000000000000000 ffffffff8010e1b0 0000000000000000 ffffffff805509bc [ 50.072327] ... [ 50.076087] Call Trace: [ 50.079869] [] show_stack+0x80/0xa8 [ 50.086577] [] dump_stack+0x10c/0x190 [ 50.093498] [] __warn+0xf0/0x108 [ 50.099889] [] warn_slowpath_fmt+0x3c/0x48 [ 50.107241] [] check_flags.part.41+0x1dc/0x1e8 [ 50.114961] [] lock_is_held_type+0x8c/0xb0 [ 50.122291] [] __schedule+0x8c0/0x10f8 [ 50.129221] [] schedule+0x30/0x98 [ 50.135659] [] work_resched+0x8/0x34 [ 50.142397] ---[ end trace 0cb4f6ef5b99fe21 ]--- [ 50.148405] possible reason: unannotated irqs-off. [ 50.154600] irq event stamp: 400463 [ 50.159566] hardirqs last enabled at (400463): [] _raw_spin_unlock_irqrestore+0x40/0xa8 [ 50.171981] hardirqs last disabled at (400462): [] _raw_spin_lock_irqsave+0x30/0xb0 [ 50.183897] softirqs last enabled at (400450): [] __do_softirq+0x4ac/0x6a8 [ 50.195015] softirqs last disabled at (400425): [] irq_exit+0x110/0x128 Fix this by using the TRACE_IRQS_OFF macro to call trace_hardirqs_off() when CONFIG_TRACE_IRQFLAGS is enabled. This is done before invoking schedule() following the work_resched label because: 1) Interrupts are disabled regardless of the path we take to reach work_resched() & schedule(). 2) Performing the tracing here avoids the need to do it in paths which disable interrupts but don't call out to C code before hitting a path which uses the RESTORE_SOME macro that will call trace_hardirqs_on() or trace_hardirqs_off() as appropriate. We call trace_hardirqs_on() using the TRACE_IRQS_ON macro before calling syscall_trace_leave() for similar reasons, ensuring that lockdep has a consistent view of state after we re-enable interrupts. Signed-off-by: Paul Burton Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15385/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 7791840..db07793 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: + TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and @@ -173,6 +175,7 @@ syscall_exit_work: beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead + TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace -- cgit v0.10.2 From 093750c3dec46a1d440098341e531e3e7c17a96d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 28 Jun 2017 12:02:02 +0200 Subject: ALSA: hda - Fix endless loop of codec configure commit d94815f917da770d42c377786dc428f542e38f71 upstream. azx_codec_configure() loops over the codecs found on the given controller via a linked list. The code used to work in the past, but in the current version, this may lead to an endless loop when a codec binding returns an error. The culprit is that the snd_hda_codec_configure() unregisters the device upon error, and this eventually deletes the given codec object from the bus. Since the list is initialized via list_del_init(), the next object points to the same device itself. This behavior change was introduced at splitting the HD-audio code code, and forgotten to adapt it here. For fixing this bug, just use a *_safe() version of list iteration. Fixes: d068ebc25e6e ("ALSA: hda - Move some codes up to hdac_bus struct") Reported-by: Daniel Vetter Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 373fcad..776dffa 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -294,6 +294,8 @@ struct hda_codec { #define list_for_each_codec(c, bus) \ list_for_each_entry(c, &(bus)->core.codec_list, core.list) +#define list_for_each_codec_safe(c, n, bus) \ + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) /* snd_hda_codec_read/write optional flags */ #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 5008785..0af1132 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1333,8 +1333,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs); /* configure each codec instance */ int azx_codec_configure(struct azx *chip) { - struct hda_codec *codec; - list_for_each_codec(codec, &chip->bus) { + struct hda_codec *codec, *next; + + /* use _safe version here since snd_hda_codec_configure() deregisters + * the device upon error and deletes itself from the bus list. + */ + list_for_each_codec_safe(codec, next, &chip->bus) { snd_hda_codec_configure(codec); } return 0; -- cgit v0.10.2 From 7d0e27fe24c55dda16ad579db5a0234b3ff97770 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Wed, 28 Jun 2017 08:59:16 +0800 Subject: ALSA: hda - set input_path bitmap to zero after moving it to new place commit a8f20fd25bdce81a8e41767c39f456d346b63427 upstream. Recently we met a problem, the codec has valid adcs and input pins, and they can form valid input paths, but the driver does not build valid controls for them like "Mic boost", "Capture Volume" and "Capture Switch". Through debugging, I found the driver needs to shrink the invalid adcs and input paths for this machine, so it will move the whole column bitmap value to the previous column, after moving it, the driver forgets to set the original column bitmap value to zero, as a result, the driver will invalidate the path whose index value is the original colume bitmap value. After executing this function, all valid input paths are invalidated by a mistake, there are no any valid input paths, so the driver won't build controls for them. Fixes: 3a65bcdc577a ("ALSA: hda - Fix inconsistent input_paths after ADC reduction") Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index e7c8f4f..b0bd290 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3169,6 +3169,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec) spec->input_paths[i][nums]); spec->input_paths[i][nums] = spec->input_paths[i][n]; + spec->input_paths[i][n] = 0; } } nums++; -- cgit v0.10.2 From cb2c6fdf620f4802c31d6577ff34391fdd949cc6 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 27 Jun 2017 17:33:38 -0400 Subject: NFSv4.1: Fix a race in nfs4_proc_layoutget commit bd171930e6a3de4f5cffdafbb944e50093dfb59b upstream. If the task calling layoutget is signalled, then it is possible for the calls to nfs4_sequence_free_slot() and nfs4_layoutget_prepare() to race, in which case we leak a slot. The fix is to move the call to nfs4_sequence_free_slot() into the nfs4_layoutget_release() so that it gets called at task teardown time. Fixes: 2e80dbe7ac51 ("NFSv4.1: Close callback races for OPEN, LAYOUTGET...") Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4a64fa0..401ea6e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8429,6 +8429,7 @@ static void nfs4_layoutget_release(void *calldata) size_t max_pages = max_response_pages(server); dprintk("--> %s\n", __func__); + nfs4_sequence_free_slot(&lgp->res.seq_res); nfs4_free_pages(lgp->args.layout.pages, max_pages); pnfs_put_layout_hdr(NFS_I(inode)->layout); put_nfs_open_context(lgp->args.ctx); @@ -8503,7 +8504,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ if (status == 0 && lgp->res.layoutp->len) lseg = pnfs_layout_process(lgp); - nfs4_sequence_free_slot(&lgp->res.seq_res); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); if (status) -- cgit v0.10.2 From 78c4244f8bdbf3cefa1e01bfcfe7a53bcc45c0f3 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 23 Jun 2017 13:45:16 +0200 Subject: gpiolib: fix filtering out unwanted events commit ad537b822577fcc143325786cd6ad50d7b9df31c upstream. GPIOEVENT_REQUEST_BOTH_EDGES is not a single flag, but a binary OR of GPIOEVENT_REQUEST_RISING_EDGE and GPIOEVENT_REQUEST_FALLING_EDGE. The expression 'le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES' we'll get evaluated to true even if only one event type was requested. Fix it by checking both RISING & FALLING flags explicitly. Fixes: 61f922db7221 ("gpio: userspace ABI for reading GPIO line events") Signed-off-by: Bartosz Golaszewski Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9215931..f2bb512 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -707,7 +707,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) ge.timestamp = ktime_get_real_ns(); - if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) { + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { int level = gpiod_get_value_cansleep(le->desc); if (level) -- cgit v0.10.2 From 466877f2d25758e5ad007b292c1f225520f8c877 Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Mon, 26 Jun 2017 14:39:08 +0200 Subject: drm/vmwgfx: Free hash table allocated by cmdbuf managed res mgr commit 82fcee526ba8ca2c5d378bdf51b21b7eb058fe3a upstream. The hash table created during vmw_cmdbuf_res_man_create was never freed. This causes memory leak in context creation. Added the corresponding drm_ht_remove in vmw_cmdbuf_res_man_destroy. Tested for memory leak by running piglit overnight and kernel memory is not inflated which earlier was. Signed-off-by: Deepak Rawat Reviewed-by: Sinclair Yeh Signed-off-by: Thomas Hellstrom Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2..1f013d4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); + drm_ht_remove(&man->resources); kfree(man); } -- cgit v0.10.2 From 1c0fa383b3391f1e5528b264bd9b3ca9209054cf Mon Sep 17 00:00:00 2001 From: Vallish Vaidyeshwara Date: Fri, 23 Jun 2017 18:53:06 +0000 Subject: dm thin: do not queue freed thin mapping for next stage processing commit 00a0ea33b495ee6149bf5a77ac5807ce87323abb upstream. process_prepared_discard_passdown_pt1() should cleanup dm_thin_new_mapping in cases of error. dm_pool_inc_data_range() can fail trying to get a block reference: metadata operation 'dm_pool_inc_data_range' failed: error = -61 When dm_pool_inc_data_range() fails, dm thin aborts current metadata transaction and marks pool as PM_READ_ONLY. Memory for thin mapping is released as well. However, current thin mapping will be queued onto next stage as part of queue_passdown_pt2() or passdown_endio(). This dangling thin mapping memory when processed and accessed in next stage will lead to device mapper crashing. Code flow without fix: -> process_prepared_discard_passdown_pt1(m) -> dm_thin_remove_range() -> discard passdown --> passdown_endio(m) queues m onto next stage -> dm_pool_inc_data_range() fails, frees memory m but does not remove it from next stage queue -> process_prepared_discard_passdown_pt2(m) -> processes freed memory m and crashes One such stack: Call Trace: [] dm_cell_release_no_holder+0x2f/0x70 [dm_bio_prison] [] cell_defer_no_holder+0x3c/0x80 [dm_thin_pool] [] process_prepared_discard_passdown_pt2+0x4b/0x90 [dm_thin_pool] [] process_prepared+0x81/0xa0 [dm_thin_pool] [] do_worker+0xc5/0x820 [dm_thin_pool] [] ? __schedule+0x244/0x680 [] ? pwq_activate_delayed_work+0x42/0xb0 [] process_one_work+0x153/0x3f0 [] worker_thread+0x12b/0x4b0 [] ? rescuer_thread+0x350/0x350 [] kthread+0xca/0xe0 [] ? kthread_park+0x60/0x60 [] ret_from_fork+0x25/0x30 The fix is to first take the block ref count for discarded block and then do a passdown discard of this block. If block ref count fails, then bail out aborting current metadata transaction, mark pool as PM_READ_ONLY and also free current thin mapping memory (existing error handling code) without queueing this thin mapping onto next stage of processing. If block ref count succeeds, then passdown discard of this block. Discard callback of passdown_endio() will queue this thin mapping onto next stage of processing. Code flow with fix: -> process_prepared_discard_passdown_pt1(m) -> dm_thin_remove_range() -> dm_pool_inc_data_range() --> if fails, free memory m and bail out -> discard passdown --> passdown_endio(m) queues m onto next stage Reviewed-by: Eduardo Valentin Reviewed-by: Cristian Gafton Reviewed-by: Anchal Agarwal Signed-off-by: Vallish Vaidyeshwara Reviewed-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index be869a9..0b678b5 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1095,6 +1095,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) return; } + /* + * Increment the unmapped blocks. This prevents a race between the + * passdown io and reallocation of freed blocks. + */ + r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); + if (r) { + metadata_operation_failed(pool, "dm_pool_inc_data_range", r); + bio_io_error(m->bio); + cell_defer_no_holder(tc, m->cell); + mempool_free(m, pool->mapping_pool); + return; + } + discard_parent = bio_alloc(GFP_NOIO, 1); if (!discard_parent) { DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", @@ -1115,19 +1128,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) end_discard(&op, r); } } - - /* - * Increment the unmapped blocks. This prevents a race between the - * passdown io and reallocation of freed blocks. - */ - r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); - if (r) { - metadata_operation_failed(pool, "dm_pool_inc_data_range", r); - bio_io_error(m->bio); - cell_defer_no_holder(tc, m->cell); - mempool_free(m, pool->mapping_pool); - return; - } } static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) -- cgit v0.10.2 From ed96148d7f8e900b61d90de79bf3b273887ffa70 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 4 May 2017 10:25:47 +0800 Subject: x86/mm: Fix boot crash caused by incorrect loop count calculation in sync_global_pgds() commit fc5f9d5f151c9fff21d3d1d2907b888a5aec3ff7 upstream. Jeff Moyer reported that on his system with two memory regions 0~64G and 1T~1T+192G, and kernel option "memmap=192G!1024G" added, enabling KASLR will make the system hang intermittently during boot. While adding 'nokaslr' won't. The back trace is: Oops: 0000 [#1] SMP RIP: memcpy_erms() [ .... ] Call Trace: pmem_rw_page() bdev_read_page() do_mpage_readpage() mpage_readpages() blkdev_readpages() __do_page_cache_readahead() force_page_cache_readahead() page_cache_sync_readahead() generic_file_read_iter() blkdev_read_iter() __vfs_read() vfs_read() SyS_read() entry_SYSCALL_64_fastpath() This crash happens because the for loop count calculation in sync_global_pgds() is not correct. When a mapping area crosses PGD entries, we should calculate the starting address of region which next PGD covers and assign it to next for loop count, but not add PGDIR_SIZE directly. The old code works right only if the mapping area is an exact multiple of PGDIR_SIZE, otherwize the end region could be skipped so that it can't be synchronized to all other processes from kernel PGD init_mm.pgd. In Jeff's system, emulated pmem area [1024G, 1216G) is smaller than PGDIR_SIZE. While 'nokaslr' works because PAGE_OFFSET is 1T aligned, it makes this area be mapped inside one PGD entry. With KASLR enabled, this area could cross two PGD entries, then the next PGD entry won't be synced to all other processes. That is why we saw empty PGD. Fix it. Reported-by: Jeff Moyer Signed-off-by: Baoquan He Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dan Williams Cc: Dave Hansen Cc: Dave Young Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jinbum Park Cc: Josh Poimboeuf Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Garnier Cc: Thomas Gleixner Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Link: http://lkml.kernel.org/r/1493864747-8506-1-git-send-email-bhe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 14b9dd7..9a324fc 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup); */ void sync_global_pgds(unsigned long start, unsigned long end, int removed) { - unsigned long address; + unsigned long addr; - for (address = start; address <= end; address += PGDIR_SIZE) { - const pgd_t *pgd_ref = pgd_offset_k(address); + for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + const pgd_t *pgd_ref = pgd_offset_k(addr); struct page *page; /* @@ -113,7 +113,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed) pgd_t *pgd; spinlock_t *pgt_lock; - pgd = (pgd_t *)page_address(page) + pgd_index(address); + pgd = (pgd_t *)page_address(page) + pgd_index(addr); /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); -- cgit v0.10.2 From d2da8d394147526a28c6d5bb83a72635e7f0a288 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 8 Dec 2016 19:55:22 +0800 Subject: usb: gadget: f_fs: Fix possibe deadlock commit b3ce3ce02d146841af012d08506b4071db8ffde3 upstream. When system try to close /dev/usb-ffs/adb/ep0 on one core, at the same time another core try to attach new UDC, which will cause deadlock as below scenario. Thus we should release ffs lock before issuing unregister_gadget_item(). [ 52.642225] c1 ====================================================== [ 52.642228] c1 [ INFO: possible circular locking dependency detected ] [ 52.642236] c1 4.4.6+ #1 Tainted: G W O [ 52.642241] c1 ------------------------------------------------------- [ 52.642245] c1 usb ffs open/2808 is trying to acquire lock: [ 52.642270] c0 (udc_lock){+.+.+.}, at: [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642272] c1 but task is already holding lock: [ 52.642283] c0 (ffs_lock){+.+.+.}, at: [] ffs_data_clear+0x30/0x140 [ 52.642285] c1 which lock already depends on the new lock. [ 52.642287] c1 the existing dependency chain (in reverse order) is: [ 52.642295] c0 -> #1 (ffs_lock){+.+.+.}: [ 52.642307] c0 [] __lock_acquire+0x20f0/0x2238 [ 52.642314] c0 [] lock_acquire+0xe4/0x298 [ 52.642322] c0 [] mutex_lock_nested+0x7c/0x3cc [ 52.642328] c0 [] ffs_func_bind+0x504/0x6e8 [ 52.642334] c0 [] usb_add_function+0x84/0x184 [ 52.642340] c0 [] configfs_composite_bind+0x264/0x39c [ 52.642346] c0 [] udc_bind_to_driver+0x58/0x11c [ 52.642352] c0 [] usb_udc_attach_driver+0x90/0xc8 [ 52.642358] c0 [] gadget_dev_desc_UDC_store+0xd4/0x128 [ 52.642369] c0 [] configfs_write_file+0xd0/0x13c [ 52.642376] c0 [] vfs_write+0xb8/0x214 [ 52.642381] c0 [] SyS_write+0x54/0xb0 [ 52.642388] c0 [] el0_svc_naked+0x24/0x28 [ 52.642395] c0 -> #0 (udc_lock){+.+.+.}: [ 52.642401] c0 [] print_circular_bug+0x84/0x2e4 [ 52.642407] c0 [] __lock_acquire+0x2138/0x2238 [ 52.642412] c0 [] lock_acquire+0xe4/0x298 [ 52.642420] c0 [] mutex_lock_nested+0x7c/0x3cc [ 52.642427] c0 [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642432] c0 [] unregister_gadget_item+0x28/0x44 [ 52.642439] c0 [] ffs_data_clear+0x138/0x140 [ 52.642444] c0 [] ffs_data_reset+0x20/0x6c [ 52.642450] c0 [] ffs_data_closed+0xac/0x12c [ 52.642454] c0 [] ffs_ep0_release+0x20/0x2c [ 52.642460] c0 [] __fput+0xb0/0x1f4 [ 52.642466] c0 [] ____fput+0x20/0x2c [ 52.642473] c0 [] task_work_run+0xb4/0xe8 [ 52.642482] c0 [] do_exit+0x360/0xb9c [ 52.642487] c0 [] do_group_exit+0x4c/0xb0 [ 52.642494] c0 [] get_signal+0x380/0x89c [ 52.642501] c0 [] do_signal+0x154/0x518 [ 52.642507] c0 [] do_notify_resume+0x70/0x78 [ 52.642512] c0 [] work_pending+0x1c/0x20 [ 52.642514] c1 other info that might help us debug this: [ 52.642517] c1 Possible unsafe locking scenario: [ 52.642518] c1 CPU0 CPU1 [ 52.642520] c1 ---- ---- [ 52.642525] c0 lock(ffs_lock); [ 52.642529] c0 lock(udc_lock); [ 52.642533] c0 lock(ffs_lock); [ 52.642537] c0 lock(udc_lock); [ 52.642539] c1 *** DEADLOCK *** [ 52.642543] c1 1 lock held by usb ffs open/2808: [ 52.642555] c0 #0: (ffs_lock){+.+.+.}, at: [] ffs_data_clear+0x30/0x140 [ 52.642557] c1 stack backtrace: [ 52.642563] c1 CPU: 1 PID: 2808 Comm: usb ffs open Tainted: G [ 52.642565] c1 Hardware name: Spreadtrum SP9860g Board (DT) [ 52.642568] c1 Call trace: [ 52.642573] c1 [] dump_backtrace+0x0/0x170 [ 52.642577] c1 [] show_stack+0x20/0x28 [ 52.642583] c1 [] dump_stack+0xa8/0xe0 [ 52.642587] c1 [] print_circular_bug+0x1fc/0x2e4 [ 52.642591] c1 [] __lock_acquire+0x2138/0x2238 [ 52.642595] c1 [] lock_acquire+0xe4/0x298 [ 52.642599] c1 [] mutex_lock_nested+0x7c/0x3cc [ 52.642604] c1 [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642608] c1 [] unregister_gadget_item+0x28/0x44 [ 52.642613] c1 [] ffs_data_clear+0x138/0x140 [ 52.642618] c1 [] ffs_data_reset+0x20/0x6c [ 52.642621] c1 [] ffs_data_closed+0xac/0x12c [ 52.642625] c1 [] ffs_ep0_release+0x20/0x2c [ 52.642629] c1 [] __fput+0xb0/0x1f4 [ 52.642633] c1 [] ____fput+0x20/0x2c [ 52.642636] c1 [] task_work_run+0xb4/0xe8 [ 52.642640] c1 [] do_exit+0x360/0xb9c [ 52.642644] c1 [] do_group_exit+0x4c/0xb0 [ 52.642647] c1 [] get_signal+0x380/0x89c [ 52.642651] c1 [] do_signal+0x154/0x518 [ 52.642656] c1 [] do_notify_resume+0x70/0x78 [ 52.642659] c1 [] work_pending+0x1c/0x20 Acked-by: Michal Nazarewicz Signed-off-by: Baolin Wang Signed-off-by: Felipe Balbi Cc: Jerry Zhang Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 04ffd76..f9c9980 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3688,6 +3688,7 @@ static void ffs_closed(struct ffs_data *ffs) { struct ffs_dev *ffs_obj; struct f_fs_opts *opts; + struct config_item *ci; ENTER(); ffs_dev_lock(); @@ -3711,8 +3712,11 @@ static void ffs_closed(struct ffs_data *ffs) || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) goto done; - unregister_gadget_item(ffs_obj->opts-> - func_inst.group.cg_item.ci_parent->ci_parent); + ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; + ffs_dev_unlock(); + + unregister_gadget_item(ci); + return; done: ffs_dev_unlock(); } -- cgit v0.10.2 From 6539c4f991c28e82d1eb0385d35cc9662985f61a Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:25 +0200 Subject: l2tp: fix race in l2tp_recv_common() commit 61b9a047729bb230978178bca6729689d0c50ca2 upstream. Taking a reference on sessions in l2tp_recv_common() is racy; this has to be done by the callers. To this end, a new function is required (l2tp_session_get()) to atomically lookup a session and take a reference on it. Callers then have to manually drop this reference. Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index e702cb9..046a5ba 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -278,6 +278,55 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn } EXPORT_SYMBOL_GPL(l2tp_session_find); +/* Like l2tp_session_find() but takes a reference on the returned session. + * Optionally calls session->ref() too if do_ref is true. + */ +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref) +{ + struct hlist_head *session_list; + struct l2tp_session *session; + + if (!tunnel) { + struct l2tp_net *pn = l2tp_pernet(net); + + session_list = l2tp_session_id_hash_2(pn, session_id); + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(session, session_list, global_hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + rcu_read_unlock_bh(); + + return session; + } + } + rcu_read_unlock_bh(); + + return NULL; + } + + session_list = l2tp_session_id_hash(tunnel, session_id); + read_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session, session_list, hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + + return session; + } + } + read_unlock_bh(&tunnel->hlist_lock); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_session_get); + struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, bool do_ref) { @@ -637,6 +686,9 @@ discard: * a data (not control) frame before coming here. Fields up to the * session-id have already been parsed and ptr points to the data * after the session-id. + * + * session->ref() must have been called prior to l2tp_recv_common(). + * session->deref() will be called automatically after skb is processed. */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, @@ -646,14 +698,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int offset; u32 ns, nr; - /* The ref count is increased since we now hold a pointer to - * the session. Take care to decrement the refcnt when exiting - * this function from now on... - */ - l2tp_session_inc_refcount(session); - if (session->ref) - (*session->ref)(session); - /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { @@ -806,8 +850,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, /* Try to dequeue as many skbs from reorder_q as we can. */ l2tp_recv_dequeue(session); - l2tp_session_dec_refcount(session); - return; discard: @@ -816,8 +858,6 @@ discard: if (session->deref) (*session->deref)(session); - - l2tp_session_dec_refcount(session); } EXPORT_SYMBOL(l2tp_recv_common); @@ -924,8 +964,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } /* Find the session context */ - session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); + session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true); if (!session || !session->recv_skb) { + if (session) { + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } + /* Not found? Pass to userspace to deal with */ l2tp_info(tunnel, L2TP_MSG_DATA, "%s: no session found (%u/%u). Passing up.\n", @@ -934,6 +980,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); + l2tp_session_dec_refcount(session); return 0; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index e7233ba..1d02050 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -240,6 +240,9 @@ out: return tunnel; } +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref); struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 2066953..3468d56 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); return 0; @@ -203,6 +204,12 @@ pass_up: return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a4b0c92..b10abef 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); + return 0; pass_up: @@ -216,6 +218,12 @@ pass_up: return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); -- cgit v0.10.2 From 806e98835683694cbb9e74c28641df8042792e27 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:26 +0200 Subject: l2tp: ensure session can't get removed during pppol2tp_session_ioctl() commit 57377d63547861919ee634b845c7caa38de4a452 upstream. Holding a reference on session is required before calling pppol2tp_session_ioctl(). The session could get freed while processing the ioctl otherwise. Since pppol2tp_session_ioctl() uses the session's socket, we also need to take a reference on it in l2tp_session_get(). Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 1387f54..c1c9a9e 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1141,11 +1141,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, if (stats.session_id != 0) { /* resend to session ioctl handler */ struct l2tp_session *session = - l2tp_session_find(sock_net(sk), tunnel, stats.session_id); - if (session != NULL) - err = pppol2tp_session_ioctl(session, cmd, arg); - else + l2tp_session_get(sock_net(sk), tunnel, + stats.session_id, true); + + if (session) { + err = pppol2tp_session_ioctl(session, cmd, + arg); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } else { err = -EBADR; + } break; } #ifdef CONFIG_XFRM -- cgit v0.10.2 From d9face6fc62a73059f0fc3a3de4dfe8f53536aa7 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:27 +0200 Subject: l2tp: fix duplicate session creation commit dbdbc73b44782e22b3b4b6e8b51e7a3d245f3086 upstream. l2tp_session_create() relies on its caller for checking for duplicate sessions. This is racy since a session can be concurrently inserted after the caller's verification. Fix this by letting l2tp_session_create() verify sessions uniqueness upon insertion. Callers need to be adapted to check for l2tp_session_create()'s return code instead of calling l2tp_session_find(). pppol2tp_connect() is a bit special because it has to work on existing sessions (if they're not connected) or to create a new session if none is found. When acting on a preexisting session, a reference must be held or it could go away on us. So we have to use l2tp_session_get() instead of l2tp_session_find() and drop the reference before exiting. Fixes: d9e31d17ceba ("l2tp: Add L2TP ethernet pseudowire support") Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 046a5ba..f29911a 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -378,6 +378,48 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) } EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, + struct l2tp_session *session) +{ + struct l2tp_session *session_walk; + struct hlist_head *g_head; + struct hlist_head *head; + struct l2tp_net *pn; + + head = l2tp_session_id_hash(tunnel, session->session_id); + + write_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session_walk, head, hlist) + if (session_walk->session_id == session->session_id) + goto exist; + + if (tunnel->version == L2TP_HDR_VER_3) { + pn = l2tp_pernet(tunnel->l2tp_net); + g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net), + session->session_id); + + spin_lock_bh(&pn->l2tp_session_hlist_lock); + hlist_for_each_entry(session_walk, g_head, global_hlist) + if (session_walk->session_id == session->session_id) + goto exist_glob; + + hlist_add_head_rcu(&session->global_hlist, g_head); + spin_unlock_bh(&pn->l2tp_session_hlist_lock); + } + + hlist_add_head(&session->hlist, head); + write_unlock_bh(&tunnel->hlist_lock); + + return 0; + +exist_glob: + spin_unlock_bh(&pn->l2tp_session_hlist_lock); +exist: + write_unlock_bh(&tunnel->hlist_lock); + + return -EEXIST; +} + /* Lookup a tunnel by id */ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) @@ -1787,6 +1829,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; + int err; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); if (session != NULL) { @@ -1842,6 +1885,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn l2tp_session_set_header_len(session, tunnel->version); + err = l2tp_session_add_to_tunnel(tunnel, session); + if (err) { + kfree(session); + + return ERR_PTR(err); + } + /* Bump the reference count. The session context is deleted * only when this drops to zero. */ @@ -1851,28 +1901,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn /* Ensure tunnel socket isn't deleted */ sock_hold(tunnel->sock); - /* Add session to the tunnel's hash list */ - write_lock_bh(&tunnel->hlist_lock); - hlist_add_head(&session->hlist, - l2tp_session_id_hash(tunnel, session_id)); - write_unlock_bh(&tunnel->hlist_lock); - - /* And to the global session list if L2TPv3 */ - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_add_head_rcu(&session->global_hlist, - l2tp_session_id_hash_2(pn, session_id)); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); - } - /* Ignore management session in session count value */ if (session->session_id != 0) atomic_inc(&l2tp_session_count); + + return session; } - return session; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(l2tp_session_create); diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 965f7e3..eecc64e 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -223,12 +223,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p goto out; } - session = l2tp_session_find(net, tunnel, session_id); - if (session) { - rc = -EEXIST; - goto out; - } - if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { @@ -242,8 +236,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); - if (!session) { - rc = -ENOMEM; + if (IS_ERR(session)) { + rc = PTR_ERR(session); goto out; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index c1c9a9e..1696f1f 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -583,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int error = 0; u32 tunnel_id, peer_tunnel_id; u32 session_id, peer_session_id; + bool drop_refcnt = false; int ver = 2; int fd; @@ -684,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = peer_tunnel_id; - /* Create session if it doesn't already exist. We handle the - * case where a session was previously created by the netlink - * interface by checking that the session doesn't already have - * a socket and its tunnel socket are what we expect. If any - * of those checks fail, return EEXIST to the caller. - */ - session = l2tp_session_find(sock_net(sk), tunnel, session_id); - if (session == NULL) { - /* Default MTU must allow space for UDP/L2TP/PPP - * headers. + session = l2tp_session_get(sock_net(sk), tunnel, session_id, false); + if (session) { + drop_refcnt = true; + ps = l2tp_session_priv(session); + + /* Using a pre-existing session is fine as long as it hasn't + * been connected yet. */ - cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; + if (ps->sock) { + error = -EEXIST; + goto end; + } - /* Allocate and initialize a new session context. */ - session = l2tp_session_create(sizeof(struct pppol2tp_session), - tunnel, session_id, - peer_session_id, &cfg); - if (session == NULL) { - error = -ENOMEM; + /* consistency checks */ + if (ps->tunnel_sock != tunnel->sock) { + error = -EEXIST; goto end; } } else { - ps = l2tp_session_priv(session); - error = -EEXIST; - if (ps->sock != NULL) - goto end; + /* Default MTU must allow space for UDP/L2TP/PPP headers */ + cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; + cfg.mru = cfg.mtu; - /* consistency checks */ - if (ps->tunnel_sock != tunnel->sock) + session = l2tp_session_create(sizeof(struct pppol2tp_session), + tunnel, session_id, + peer_session_id, &cfg); + if (IS_ERR(session)) { + error = PTR_ERR(session); goto end; + } } /* Associate session with its PPPoL2TP socket */ @@ -778,6 +779,8 @@ out_no_ppp: session->name); end: + if (drop_refcnt) + l2tp_session_dec_refcount(session); release_sock(sk); return error; @@ -805,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i if (tunnel->sock == NULL) goto out; - /* Check that this session doesn't already exist */ - error = -EEXIST; - session = l2tp_session_find(net, tunnel, session_id); - if (session != NULL) - goto out; - /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; @@ -818,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ - error = -ENOMEM; session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, cfg); - if (session == NULL) + if (IS_ERR(session)) { + error = PTR_ERR(session); goto out; + } ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; -- cgit v0.10.2 From 599e6f038777c6733eef244d4aac192edb612aa6 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:29 +0200 Subject: l2tp: hold session while sending creation notifications commit 5e6a9e5a3554a5b3db09cdc22253af1849c65dff upstream. l2tp_session_find() doesn't take any reference on the returned session. Therefore, the session may disappear while sending the notification. Use l2tp_session_get() instead and decrement session's refcount once the notification is sent. Fixes: 33f72e6f0c67 ("l2tp : multicast notification to the registered listeners") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 9f66272..8c0f483 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -634,10 +634,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf session_id, peer_session_id, &cfg); if (ret >= 0) { - session = l2tp_session_find(net, tunnel, session_id); - if (session) + session = l2tp_session_get(net, tunnel, session_id, false); + if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); + l2tp_session_dec_refcount(session); + } } out: -- cgit v0.10.2 From 08cb8e5f83fd2d4f6327173cc01322bc842806f1 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:30 +0200 Subject: l2tp: take a reference on sessions used in genetlink handlers commit 2777e2ab5a9cf2b4524486c6db1517a6ded25261 upstream. Callers of l2tp_nl_session_find() need to hold a reference on the returned session since there's no guarantee that it isn't going to disappear from under them. Relying on the fact that no l2tp netlink message may be processed concurrently isn't enough: sessions can be deleted by other means (e.g. by closing the PPPOL2TP socket of a ppp pseudowire). l2tp_nl_cmd_session_delete() is a bit special: it runs a callback function that may require a previous call to session->ref(). In particular, for ppp pseudowires, the callback is l2tp_session_delete(), which then calls pppol2tp_session_close() and dereferences the PPPOL2TP socket. The socket might already be gone at the moment l2tp_session_delete() calls session->ref(), so we need to take a reference during the session lookup. So we need to pass the do_ref variable down to l2tp_session_get() and l2tp_session_get_by_ifname(). Since all callers have to be updated, l2tp_session_find_by_ifname() and l2tp_nl_session_find() are renamed to reflect their new behaviour. Fixes: 309795f4bec2 ("l2tp: Add netlink control API for L2TP") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f29911a..3bce651 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref) { struct l2tp_net *pn = l2tp_pernet(net); int hash; @@ -366,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { if (!strcmp(session->ifname, ifname)) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); rcu_read_unlock_bh(); + return session; } } @@ -376,7 +381,7 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, struct l2tp_session *session) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 1d02050..0095012 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -248,7 +248,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, bool do_ref); -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref); struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 8c0f483..1ccd310 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -55,7 +55,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; -static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) +static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, + bool do_ref) { u32 tunnel_id; u32 session_id; @@ -66,14 +67,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); - session = l2tp_session_find_by_ifname(net, ifname); + session = l2tp_session_get_by_ifname(net, ifname, do_ref); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel) - session = l2tp_session_find(net, tunnel, session_id); + session = l2tp_session_get(net, tunnel, session_id, + do_ref); } return session; @@ -652,7 +654,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf struct l2tp_session *session; u16 pw_type; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, true); if (session == NULL) { ret = -ENODEV; goto out; @@ -666,6 +668,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -675,7 +681,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf int ret = 0; struct l2tp_session *session; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; goto out; @@ -710,6 +716,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -805,29 +813,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; int ret; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; - goto out; + goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err_ref; } ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session, L2TP_CMD_SESSION_GET); if (ret < 0) - goto err_out; + goto err_ref_msg; - return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); + ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); -err_out: - nlmsg_free(msg); + l2tp_session_dec_refcount(session); -out: + return ret; + +err_ref_msg: + nlmsg_free(msg); +err_ref: + l2tp_session_dec_refcount(session); +err: return ret; } -- cgit v0.10.2 From 2aa6d036b716c9242222e054d4ef34905ad45fd3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 16 Jun 2017 14:02:34 -0700 Subject: mm: numa: avoid waiting on freed migrated pages commit 3c226c637b69104f6b9f1c6ec5b08d7b741b3229 upstream. In do_huge_pmd_numa_page(), we attempt to handle a migrating thp pmd by waiting until the pmd is unlocked before we return and retry. However, we can race with migrate_misplaced_transhuge_page(): // do_huge_pmd_numa_page // migrate_misplaced_transhuge_page() // Holds 0 refs on page // Holds 2 refs on page vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); /* ... */ if (pmd_trans_migrating(*vmf->pmd)) { page = pmd_page(*vmf->pmd); spin_unlock(vmf->ptl); ptl = pmd_lock(mm, pmd); if (page_count(page) != 2)) { /* roll back */ } /* ... */ mlock_migrate_page(new_page, page); /* ... */ spin_unlock(ptl); put_page(page); put_page(page); // page freed here wait_on_page_locked(page); goto out; } This can result in the freed page having its waiters flag set unexpectedly, which trips the PAGE_FLAGS_CHECK_AT_PREP checks in the page alloc/free functions. This has been observed on arm64 KVM guests. We can avoid this by having do_huge_pmd_numa_page() take a reference on the page before dropping the pmd lock, mirroring what we do in __migration_entry_wait(). When we hit the race, migrate_misplaced_transhuge_page() will see the reference and abort the migration, as it may do today in other cases. Fixes: b8916634b77bffb2 ("mm: Prevent parallel splits during THP migration") Link: http://lkml.kernel.org/r/1497349722-6731-2-git-send-email-will.deacon@arm.com Signed-off-by: Mark Rutland Signed-off-by: Will Deacon Acked-by: Steve Capper Acked-by: Kirill A. Shutemov Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d5b2b75..e7d5db9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1227,8 +1227,11 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) */ if (unlikely(pmd_trans_migrating(*fe->pmd))) { page = pmd_page(*fe->pmd); + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(fe->ptl); wait_on_page_locked(page); + put_page(page); goto out; } @@ -1260,8 +1263,11 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(fe->ptl); wait_on_page_locked(page); + put_page(page); page_nid = -1; goto out; } -- cgit v0.10.2 From 41172b772da4b9d875ed3fb90fe0e1a86742dc2a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 23 May 2017 21:54:10 -0400 Subject: sparc64: Handle PIO & MEM non-resumable errors. [ Upstream commit 047487241ff59374fded8c477f21453681f5995c ] User processes trying to access an invalid memory address via PIO will receive a SIGBUS signal instead of causing a panic. Memory errors will receive a SIGKILL since a SIGBUS may result in a coredump which may attempt to repeat the faulting access. Signed-off-by: Liam R. Howlett Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 496fa92..d44fb80 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs) atomic_inc(&sun4v_resum_oflow_cnt); } +/* Given a set of registers, get the virtual addressi that was being accessed + * by the faulting instructions at tpc. + */ +static unsigned long sun4v_get_vaddr(struct pt_regs *regs) +{ + unsigned int insn; + + if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) { + return compute_effective_address(regs, insn, + (insn >> 25) & 0x1f); + } + return 0; +} + +/* Attempt to handle non-resumable errors generated from userspace. + * Returns true if the signal was handled, false otherwise. + */ +bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, + struct sun4v_error_entry *ent) { + + unsigned int attrs = ent->err_attrs; + + if (attrs & SUN4V_ERR_ATTRS_MEMORY) { + unsigned long addr = ent->err_raddr; + siginfo_t info; + + if (addr == ~(u64)0) { + /* This seems highly unlikely to ever occur */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n"); + } else { + unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, + PAGE_SIZE); + + /* Break the unfortunate news. */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n", + addr); + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n", + page_cnt); + + while (page_cnt-- > 0) { + if (pfn_valid(addr >> PAGE_SHIFT)) + get_page(pfn_to_page(addr >> PAGE_SHIFT)); + addr += PAGE_SIZE; + } + } + info.si_signo = SIGKILL; + info.si_errno = 0; + info.si_trapno = 0; + force_sig_info(info.si_signo, &info, current); + + return true; + } + if (attrs & SUN4V_ERR_ATTRS_PIO) { + siginfo_t info; + + info.si_signo = SIGBUS; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *)sun4v_get_vaddr(regs); + force_sig_info(info.si_signo, &info, current); + + return true; + } + + /* Default to doing nothing */ + return false; +} + /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event, clear the first word of the entry, and die. */ @@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) put_cpu(); + if (!(regs->tstate & TSTATE_PRIV) && + sun4v_nonresum_error_user_handled(regs, &local_copy)) { + /* DON'T PANIC: This userspace error was handled. */ + return; + } + #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == cpu) { -- cgit v0.10.2 From 8886196a73204d167b7f8797eb6ebf61e76794d6 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 23 May 2017 21:54:11 -0400 Subject: sparc64: Zero pages on allocation for mondo and error queues. [ Upstream commit 7a7dc961a28b965a0d0303c2e989df17b411708b ] Error queues use a non-zero first word to detect if the queues are full. Using pages that have not been zeroed may result in false positive overflow events. These queues are set up once during boot so zeroing all mondo and error queue pages is safe. Note that the false positive overflow does not always occur because the page allocation for these queues is so early in the boot cycle that higher number CPUs get fresh pages. It is only when traps are serviced with lower number CPUs who were given already used pages that this issue is exposed. Signed-off-by: Liam R. Howlett Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e1b1ce6..5cbf03c 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) unsigned long order = get_order(size); unsigned long p; - p = __get_free_pages(GFP_KERNEL, order); + p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!p) { prom_printf("SUN4V: Error, cannot allocate queue.\n"); prom_halt(); -- cgit v0.10.2 From 0e8eca987e27077fc2ade85aa402dbc177fdb026 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Sat, 28 Jan 2017 22:53:28 +0300 Subject: net: ethtool: add support for 2500BaseT and 5000BaseT link modes [ Upstream commit 94842b4fc4d6b1691cfc86c6f5251f299d27f4ba ] This patch introduce support for 2500BaseT and 5000BaseT link modes. These modes are included in the new IEEE 802.3bz standard. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 8e54723..5c22e8c 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1368,6 +1368,8 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit @@ -1377,7 +1379,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ -- cgit v0.10.2 From 97ace183074d306942b903a148aebd5d061758f0 Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 28 Nov 2016 10:46:46 +0100 Subject: net: phy: add an option to disable EEE advertisement MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit d853d145ea3e63387a2ac759aa41d5e43876e561 ] This patch adds an option to disable EEE advertisement in the generic PHY by providing a mask of prohibited modes corresponding to the value found in the MDIO_AN_EEE_ADV register. On some platforms, PHY Low power idle seems to be causing issues, even breaking the link some cases. The patch provides a convenient way for these platforms to disable EEE advertisement and work around the issue. Signed-off-by: Jerome Brunet Tested-by: Yegor Yefremov Tested-by: Andreas Färber Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a9be26f..edd30eb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1384,6 +1384,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); + /* Mask prohibited EEE modes */ + val &= ~phydev->eee_broken_modes; + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val); return 0; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 14d57d0..b14fcf6 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1146,6 +1146,43 @@ static int genphy_config_advert(struct phy_device *phydev) } /** + * genphy_config_eee_advert - disable unwanted eee mode advertisement + * @phydev: target phy_device struct + * + * Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy + * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't + * changed, and 1 if it has changed. + */ +static int genphy_config_eee_advert(struct phy_device *phydev) +{ + u32 broken = phydev->eee_broken_modes; + u32 old_adv, adv; + + /* Nothing to disable */ + if (!broken) + return 0; + + /* If the following call fails, we assume that EEE is not + * supported by the phy. If we read 0, EEE is not advertised + * In both case, we don't need to continue + */ + adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); + if (adv <= 0) + return 0; + + old_adv = adv; + adv &= ~broken; + + /* Advertising remains unchanged with the broken mask */ + if (old_adv == adv) + return 0; + + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv); + + return 1; +} + +/** * genphy_setup_forced - configures/forces speed/duplex from @phydev * @phydev: target phy_device struct * @@ -1203,15 +1240,20 @@ EXPORT_SYMBOL(genphy_restart_aneg); */ int genphy_config_aneg(struct phy_device *phydev) { - int result; + int err, changed; + + changed = genphy_config_eee_advert(phydev); if (AUTONEG_ENABLE != phydev->autoneg) return genphy_setup_forced(phydev); - result = genphy_config_advert(phydev); - if (result < 0) /* error */ - return result; - if (result == 0) { + err = genphy_config_advert(phydev); + if (err < 0) /* error */ + return err; + + changed |= err; + + if (changed == 0) { /* Advertisement hasn't changed, but maybe aneg was never on to * begin with? Or maybe phy was isolated? */ @@ -1221,16 +1263,16 @@ int genphy_config_aneg(struct phy_device *phydev) return ctl; if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE)) - result = 1; /* do restart aneg */ + changed = 1; /* do restart aneg */ } /* Only restart aneg if we are advertising something different * than we were before. */ - if (result > 0) - result = genphy_restart_aneg(phydev); + if (changed > 0) + return genphy_restart_aneg(phydev); - return result; + return 0; } EXPORT_SYMBOL(genphy_config_aneg); @@ -1588,6 +1630,21 @@ static void of_set_phy_supported(struct phy_device *phydev) __set_phy_supported(phydev, max_speed); } +static void of_set_phy_eee_broken(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 broken; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (!of_property_read_u32(node, "eee-broken-modes", &broken)) + phydev->eee_broken_modes = broken; +} + /** * phy_probe - probe and init a PHY device * @dev: device to probe and init @@ -1625,6 +1682,11 @@ static int phy_probe(struct device *dev) of_set_phy_supported(phydev); phydev->advertising = phydev->supported; + /* Get the EEE modes we want to prohibit. We will ask + * the PHY stop advertising these mode later on + */ + of_set_phy_eee_broken(phydev); + /* Set the state to READY by default */ phydev->state = PHY_READY; diff --git a/include/linux/phy.h b/include/linux/phy.h index bd22670..6c9b1e0 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -401,6 +401,9 @@ struct phy_device { u32 advertising; u32 lp_advertising; + /* Energy efficient ethernet modes which should be prohibited */ + u32 eee_broken_modes; + int autoneg; int link_timeout; -- cgit v0.10.2 From 752ba680eb70ebc1e235b2ac1087ce471e2c800d Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 28 Nov 2016 10:46:47 +0100 Subject: dt-bindings: net: add EEE capability constants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 1fc31357ad194fb98691f3d122bcd47e59239e83 ] Signed-off-by: Jerome Brunet Tested-by: Yegor Yefremov Tested-by: Andreas Färber Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/include/dt-bindings/net/mdio.h b/include/dt-bindings/net/mdio.h new file mode 100644 index 0000000..99c6d90 --- /dev/null +++ b/include/dt-bindings/net/mdio.h @@ -0,0 +1,19 @@ +/* + * This header provides generic constants for ethernet MDIO bindings + */ + +#ifndef _DT_BINDINGS_NET_MDIO_H +#define _DT_BINDINGS_NET_MDIO_H + +/* + * EEE capability Advertisement + */ + +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ + +#endif -- cgit v0.10.2 From 40373d91a0f764c8ba5c56ea3dc88896faa4510d Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 19 Dec 2016 16:05:36 +0100 Subject: net: phy: fix sign type error in genphy_config_eee_advert [ Upstream commit 3bb9ab63276696988d8224f52db20e87194deb4b ] In genphy_config_eee_advert, the return value of phy_read_mmd_indirect is checked to know if the register could be accessed but the result is assigned to a 'u32'. Changing to 'int' to correctly get errors from phy_read_mmd_indirect. Fixes: d853d145ea3e ("net: phy: add an option to disable EEE advertisement") Reported-by: Julia Lawall Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b14fcf6..d9ec748 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1155,8 +1155,8 @@ static int genphy_config_advert(struct phy_device *phydev) */ static int genphy_config_eee_advert(struct phy_device *phydev) { - u32 broken = phydev->eee_broken_modes; - u32 old_adv, adv; + int broken = phydev->eee_broken_modes; + int old_adv, adv; /* Nothing to disable */ if (!broken) -- cgit v0.10.2 From 3897ae12b706bfc47c07a1eef58fe6ce328784cf Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 19 Dec 2016 16:05:37 +0100 Subject: net: phy: use boolean dt properties for eee broken modes [ Upstream commit 57f3986231bb2c69a55ccab1d2b30a00818027ac ] The patches regarding eee-broken-modes was merged before all people involved could find an agreement on the best way to move forward. While we agreed on having a DT property to mark particular modes as broken, the value used for eee-broken-modes mapped the phy register in very direct way. Because of this, the concern is that it could be used to implement configuration policies instead of describing a broken HW. In the end, having a boolean property for each mode seems to be preferred over one bit field value mapping the register (too) directly. Cc: Florian Fainelli Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d9ec748..32b555a 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1633,7 +1633,7 @@ static void of_set_phy_supported(struct phy_device *phydev) static void of_set_phy_eee_broken(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; - u32 broken; + u32 broken = 0; if (!IS_ENABLED(CONFIG_OF_MDIO)) return; @@ -1641,8 +1641,20 @@ static void of_set_phy_eee_broken(struct phy_device *phydev) if (!node) return; - if (!of_property_read_u32(node, "eee-broken-modes", &broken)) - phydev->eee_broken_modes = broken; + if (of_property_read_bool(node, "eee-broken-100tx")) + broken |= MDIO_EEE_100TX; + if (of_property_read_bool(node, "eee-broken-1000t")) + broken |= MDIO_EEE_1000T; + if (of_property_read_bool(node, "eee-broken-10gt")) + broken |= MDIO_EEE_10GT; + if (of_property_read_bool(node, "eee-broken-1000kx")) + broken |= MDIO_EEE_1000KX; + if (of_property_read_bool(node, "eee-broken-10gkx4")) + broken |= MDIO_EEE_10GKX4; + if (of_property_read_bool(node, "eee-broken-10gkr")) + broken |= MDIO_EEE_10GKR; + + phydev->eee_broken_modes = broken; } /** -- cgit v0.10.2 From 8bface142a8d4bc5766bc71c94a618f234ed2bc6 Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 19 Dec 2016 16:05:38 +0100 Subject: dt: bindings: net: use boolean dt properties for eee broken modes [ Upstream commit 308d3165d8b2b98d3dc3d97d6662062735daea67 ] The patches regarding eee-broken-modes was merged before all people involved could find an agreement on the best way to move forward. While we agreed on having a DT property to mark particular modes as broken, the value used for eee-broken-modes mapped the phy register in very direct way. Because of this, the concern is that it could be used to implement configuration policies instead of describing a broken HW. In the end, having a boolean property for each mode seems to be preferred over one bit field value mapping the register (too) directly. Cc: Florian Fainelli Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index bc1c3c8..62bdc5f 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -35,6 +35,15 @@ Optional Properties: - broken-turn-around: If set, indicates the PHY device does not correctly release the turn around line low at the end of a MDIO transaction. +- eee-broken-100tx: +- eee-broken-1000t: +- eee-broken-10gt: +- eee-broken-1000kx: +- eee-broken-10gkx4: +- eee-broken-10gkr: + Mark the corresponding energy efficient ethernet mode as broken and + request the ethernet to stop advertising it. + Example: ethernet-phy@0 { diff --git a/include/dt-bindings/net/mdio.h b/include/dt-bindings/net/mdio.h deleted file mode 100644 index 99c6d90..0000000 --- a/include/dt-bindings/net/mdio.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This header provides generic constants for ethernet MDIO bindings - */ - -#ifndef _DT_BINDINGS_NET_MDIO_H -#define _DT_BINDINGS_NET_MDIO_H - -/* - * EEE capability Advertisement - */ - -#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ -#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ -#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ -#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ -#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ -#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ - -#endif -- cgit v0.10.2 From 13fa36f9fbc84c47cef6673d5e2f3a20693d6eff Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 20 Jan 2017 08:20:24 -0800 Subject: ARM64: dts: meson-gxbb-odroidc2: fix GbE tx link breakage [ Upstream commit feb3cbea0946c67060e2d5bcb7499b0a6f6700fe ] OdroidC2 GbE link breaks under heavy tx transfer. This happens even if the MAC does not enable Energy Efficient Ethernet (No Low Power state Idle on the Tx path). The problem seems to come from the phy Rx path, entering the LPI state. Disabling EEE advertisement on the phy prevent this feature to be negociated with the link partner and solve the issue. Signed-off-by: Jerome Brunet Signed-off-by: Kevin Hilman Signed-off-by: Arnd Bergmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index e6e3491..f150a4c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -85,6 +85,18 @@ status = "okay"; pinctrl-0 = <ð_pins>; pinctrl-names = "default"; + phy-handle = <ð_phy0>; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + eth_phy0: ethernet-phy@0 { + reg = <0>; + eee-broken-1000t; + }; + }; }; &ir { -- cgit v0.10.2 From afaee3ef513650b2f6cb9e2c860b9210875a8135 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:48 +0200 Subject: xen/blkback: don't free be structure too early MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 71df1d7ccad1c36f7321d6b3b48f2ea42681c363 upstream. The be structure must not be freed when freeing the blkif structure isn't done. Otherwise a use-after-free of be when unmapping the ring used for communicating with the frontend will occur in case of a late call of xenblk_disconnect() (e.g. due to an I/O still active when trying to disconnect). Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index d8fc9c5..5dfe6e8 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -315,8 +315,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif) { - xen_blkif_disconnect(blkif); + WARN_ON(xen_blkif_disconnect(blkif)); xen_vbd_free(&blkif->vbd); + kfree(blkif->be->mode); + kfree(blkif->be); /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); @@ -511,8 +513,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) /* Put the reference we set in xen_blkif_alloc(). */ xen_blkif_put(be->blkif); - kfree(be->mode); - kfree(be); return 0; } -- cgit v0.10.2 From 80b1a1180e4e72fed893e5aba73fe7ccea7aa30e Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 17 Jan 2017 14:51:04 +0100 Subject: KVM: x86: fix fixing of hypercalls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ce2e852ecc9a42e4b8dabb46025cfef63209234a ] emulator_fix_hypercall() replaces hypercall with vmcall instruction, but it does not handle GP exception properly when writes the new instruction. It can return X86EMUL_PROPAGATE_FAULT without setting exception information. This leads to incorrect emulation and triggers WARN_ON(ctxt->exception.vector > 0x1f) in x86_emulate_insn() as discovered by syzkaller fuzzer: WARNING: CPU: 2 PID: 18646 at arch/x86/kvm/emulate.c:5558 Call Trace: warn_slowpath_null+0x2c/0x40 kernel/panic.c:582 x86_emulate_insn+0x16a5/0x4090 arch/x86/kvm/emulate.c:5572 x86_emulate_instruction+0x403/0x1cc0 arch/x86/kvm/x86.c:5618 emulate_instruction arch/x86/include/asm/kvm_host.h:1127 [inline] handle_exception+0x594/0xfd0 arch/x86/kvm/vmx.c:5762 vmx_handle_exit+0x2b7/0x38b0 arch/x86/kvm/vmx.c:8625 vcpu_enter_guest arch/x86/kvm/x86.c:6888 [inline] vcpu_run arch/x86/kvm/x86.c:6947 [inline] Set exception information when write in emulator_fix_hypercall() fails. Signed-off-by: Dmitry Vyukov Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Wanpeng Li Cc: kvm@vger.kernel.org Cc: syzkaller@googlegroups.com Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 62cde4f..ab3f003 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6111,7 +6111,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->patch_hypercall(vcpu, instruction); - return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); + return emulator_write_emulated(ctxt, rip, instruction, 3, + &ctxt->exception); } static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) -- cgit v0.10.2 From ee4494c6bda8ac530f85756e619c1727d2539b6c Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 12 Jan 2017 15:25:10 +0900 Subject: scsi: sd: Fix wrong DPOFUA disable in sd_read_cache_type [ Upstream commit 26f2819772af891dee2843e1f8662c58e5129d5f ] Zoned block devices force the use of READ/WRITE(16) commands by setting sdkp->use_16_for_rw and clearing sdkp->use_10_for_rw. This result in DPOFUA always being disabled for these drives as the assumed use of the deprecated READ/WRITE(6) commands only looks at sdkp->use_10_for_rw. Strenghten the test by also checking that sdkp->use_16_for_rw is false. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 931af07..13ac7e5 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2572,7 +2572,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; -- cgit v0.10.2 From 7782ab228f64e7da4c47a90b40fbb80920ce722b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 17 Jan 2017 12:23:21 +0100 Subject: stmmac: add missing of_node_put [ Upstream commit a249708bc2aa1fe3ddf15dfac22bee519d15996b ] The function stmmac_dt_phy provides several possibilities for initializing plat->mdio_node, all of which have the effect of increasing the reference count of the assigned value. This field is not updated elsewhere, so the value is live until the end of the lifetime of plat (devm_allocated), just after the end of stmmac_remove_config_dt. Thus, add an of_node_put on plat->mdio_node in stmmac_remove_config_dt. It is possible that the field mdio_node is never initialized, but of_node_put is NULL-safe, so it is also safe to call of_node_put in that case. Signed-off-by: Julia Lawall Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ac3d39c..890e4b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -346,6 +346,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(plat->phy_node); + of_node_put(plat->mdio_node); } #else struct plat_stmmacenet_data * -- cgit v0.10.2 From 42a1d5b47594eb846f709f6558082919dabc7344 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Tue, 10 Jan 2017 12:05:54 +0100 Subject: scsi: lpfc: Set elsiocb contexts to NULL after freeing it [ Upstream commit 8667f515952feefebb3c0f8d9a9266c91b101a46 ] Set the elsiocb contexts to NULL after freeing as others depend on it. Signed-off-by: Johannes Thumshirn Acked-by: Dick Kennedy Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b7d54bf..7b696d1 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); + elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; -- cgit v0.10.2 From 8cfcaa2899f322fa602e903e983389fd1de36fe8 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 23 Dec 2016 18:06:11 -0800 Subject: qla2xxx: Terminate exchange if corrupted [ Upstream commit 5f35509db179ca7ed1feaa4b14f841adb06ed220 ] Corrupted ATIO is defined as length of fcp_header & fcp_cmd payload is less than 0x38. It's the minimum size for a frame to carry 8..16 bytes SCSI CDB. The exchange will be dropped or terminated if corrupted. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Christoph Hellwig [ bvanassche: Fixed spelling in patch title ] Signed-off-by: Bart Van Assche Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8e63a7b..91ec068 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1555,7 +1555,8 @@ typedef struct { struct atio { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ }; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index feab7ea..ee450b6 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6463,12 +6463,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) if (!vha->flags.online) return; - while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, - ha_locked); + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { + /* + * This packet is corrupted. The header + payload + * can not be trusted. There is no point in passing + * it further up. + */ + ql_log(ql_log_warn, vha, 0xffff, + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); + } else { + qlt_24xx_atio_pkt_all_vps(vha, + (struct atio_from_isp *)pkt, ha_locked); + } for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index f26c5f6..0824a81 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -427,13 +427,33 @@ struct atio_from_isp { struct { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; +#define FCP_CMD_LENGTH_MASK 0x0fff +#define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ } raw; } u; } __packed; +static inline int fcpcmd_is_corrupted(struct atio *atio) +{ + if (atio->entry_type == ATIO_TYPE7 && + (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) + return 1; + else + return 0; +} + +/* adjust corrupted atio so we won't trip over the same entry again. */ +static inline void adjust_corrupted_atio(struct atio_from_isp *atio) +{ + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN); + atio->u.isp24.fcp_cmnd.add_cdb_len = 0; +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* -- cgit v0.10.2 From 0c9626619777f76a4a6761a259dcad263b1902d3 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 23 Dec 2016 18:06:13 -0800 Subject: qla2xxx: Fix erroneous invalid handle message [ Upstream commit 4f060736f29a960aba8e781a88837464756200a8 ] Termination of Immediate Notify IOCB was using wrong IOCB handle. IOCB completion code was unable to find appropriate code path due to wrong handle. Following message is seen in the logs. "Error entry - invalid handle/queue (ffff)." Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Christoph Hellwig [ bvanassche: Fixed word order in patch title ] Signed-off-by: Bart Van Assche Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 068c4e4..bddaabb 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2487,6 +2487,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; + if (pkt->entry_type == NOTIFY_ACK_TYPE && + pkt->handle == QLA_TGT_SKIP_HANDLE) + return; + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ee450b6..91f5f55 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3067,7 +3067,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; -- cgit v0.10.2 From 9f2a36a7504c994f89a1fe8e4d94b8d43423816f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Jan 2017 20:03:59 +0800 Subject: drm/amdgpu: fix program vce instance logic error. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 50a1ebc70a2803deb7811fc73fb55d70e353bc34 ] need to clear bit31-29 in GRBM_GFX_INDEX, then the program can be valid. Signed-off-by: Rex Zhu Acked-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6feed72..50f0cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,9 +43,13 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 + #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 + #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) @@ -54,6 +58,9 @@ #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) +#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ + | GRBM_GFX_INDEX__VCE_ALL_PIPE) + static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); @@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); vce_v3_0_mc_resume(adev, idx); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); @@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); @@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) * VCE team suggest use bit 3--bit 6 for busy status check */ mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { @@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; -- cgit v0.10.2 From 10c24e89b2b86907fc9588db1fa7300e9a1a194a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jan 2017 15:06:58 -0500 Subject: drm/amdgpu: add support for new hainan variants [ Upstream commit 17324b6add82d6c0bf119f1d1944baef392a4e39 ] New hainan parts require updated smc firmware. Cc: Sonny Jiang Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 6f3c891..4cb347e 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); union power_info { struct _ATOM_POWERPLAY_INFO info; @@ -7721,10 +7722,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) ((adev->pdev->device == 0x6660) || (adev->pdev->device == 0x6663) || (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667))) || - ((adev->pdev->revision == 0xc3) && - (adev->pdev->device == 0x6665))) + (adev->pdev->device == 0x6667)))) chip_name = "hainan_k"; + else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) + chip_name = "banks_k_2"; else chip_name = "hainan"; break; -- cgit v0.10.2 From 3eeb3459b7e6ec77d0ca2ae1bc82ecefe16d4c50 Mon Sep 17 00:00:00 2001 From: "Alvaro G. M" Date: Tue, 17 Jan 2017 09:08:16 +0100 Subject: net: phy: dp83848: add DP83620 PHY support [ Upstream commit 93b43fd137cd8865adf9978ab9870a344365d3af ] This PHY with fiber support is register compatible with DP83848, so add support for it. Signed-off-by: Alvaro Gamez Machado Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 800b39f..a10d0e7 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -17,6 +17,7 @@ #include #define TI_DP83848C_PHY_ID 0x20005ca0 +#define TI_DP83620_PHY_ID 0x20005ce0 #define NS_DP83848C_PHY_ID 0x20005c90 #define TLK10X_PHY_ID 0x2000a210 #define TI_DP83822_PHY_ID 0x2000a240 @@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev) static struct mdio_device_id __maybe_unused dp83848_tbl[] = { { TI_DP83848C_PHY_ID, 0xfffffff0 }, { NS_DP83848C_PHY_ID, 0xfffffff0 }, + { TI_DP83620_PHY_ID, 0xfffffff0 }, { TLK10X_PHY_ID, 0xfffffff0 }, { TI_DP83822_PHY_ID, 0xfffffff0 }, { } @@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), }; -- cgit v0.10.2 From fded17be01abfefe7218a72df703d8fe6b28206f Mon Sep 17 00:00:00 2001 From: Zhou Chengming Date: Mon, 16 Jan 2017 11:21:11 +0800 Subject: perf/x86/intel: Handle exclusive threadid correctly on CPU hotplug [ Upstream commit 4e71de7986386d5fd3765458f27d612931f27f5e ] The CPU hotplug function intel_pmu_cpu_starting() sets cpu_hw_events.excl_thread_id unconditionally to 1 when the shared exclusive counters data structure is already availabe for the sibling thread. This works during the boot process because the first sibling gets threadid 0 assigned and the second sibling which shares the data structure gets 1. But when the first thread of the core is offlined and onlined again it shares the data structure with the second thread and gets exclusive thread id 1 assigned as well. Prevent this by checking the threadid of the already online thread. [ tglx: Rewrote changelog ] Signed-off-by: Zhou Chengming Cc: NuoHan Qiao Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: kan.liang@intel.com Cc: dave.hansen@linux.intel.com Cc: eranian@google.com Cc: qiaonuohan@huawei.com Cc: davidcc@google.com Cc: guohanjun@huawei.com Link: http://lkml.kernel.org/r/1484536871-3131-1-git-send-email-zhouchengming1@huawei.com Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 3bdb917..24a6cd2 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3164,13 +3164,16 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { for_each_cpu(i, topology_sibling_cpumask(cpu)) { + struct cpu_hw_events *sibling; struct intel_excl_cntrs *c; - c = per_cpu(cpu_hw_events, i).excl_cntrs; + sibling = &per_cpu(cpu_hw_events, i); + c = sibling->excl_cntrs; if (c && c->core_id == core_id) { cpuc->kfree_on_online[1] = cpuc->excl_cntrs; cpuc->excl_cntrs = c; - cpuc->excl_thread_id = 1; + if (!sibling->excl_thread_id) + cpuc->excl_thread_id = 1; break; } } -- cgit v0.10.2 From ea7b808165a5161fcd148c8b41fa03e79e65cb82 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 23 Dec 2016 19:56:56 -0800 Subject: net: korina: Fix NAPI versus resources freeing commit e6afb1ad88feddf2347ea779cfaf4d03d3cd40b6 upstream. Commit beb0babfb77e ("korina: disable napi on close and restart") introduced calls to napi_disable() that were missing before, unfortunately this leaves a small window during which NAPI has a chance to run, yet we just freed resources since korina_free_ring() has been called: Fix this by disabling NAPI first then freeing resource, and make sure that we also cancel the restart task before doing the resource freeing. Fixes: beb0babfb77e ("korina: disable napi on close and restart") Reported-by: Alexandros C. Couloumbis Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 1799fe1..c051987 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); + korina_free_ring(dev); + if (korina_init(dev) < 0) { printk(KERN_ERR "%s: cannot restart device\n", dev->name); return; @@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; writel(tmp, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); cancel_work_sync(&lp->restart_task); + korina_free_ring(dev); + free_irq(lp->rx_irq, dev); free_irq(lp->tx_irq, dev); free_irq(lp->ovr_irq, dev); -- cgit v0.10.2 From 6e315b2b10b65022ce07e6ed3e2decf7678d58c2 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Fri, 6 Jan 2017 10:39:49 +1100 Subject: powerpc/eeh: Enable IO path on permanent error [ Upstream commit 387bbc974f6adf91aa635090f73434ed10edd915 ] We give up recovery on permanent error, simply shutdown the affected devices and remove them. If the devices can't be put into quiet state, they spew more traffic that is likely to cause another unexpected EEH error. This was observed on "p8dtu2u" machine: 0002:00:00.0 PCI bridge: IBM Device 03dc 0002:01:00.0 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) 0002:01:00.1 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) 0002:01:00.2 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) 0002:01:00.3 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) On P8 PowerNV platform, the IO path is frozen when shutdowning the devices, meaning the memory registers are inaccessible. It is why the devices can't be put into quiet state before removing them. This fixes the issue by enabling IO path prior to putting the devices into quiet state. Reported-by: Pridhiviraj Paidipeddi Signed-off-by: Gavin Shan Acked-by: Russell Currey Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index f257316..e5bfbf6 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) * * For pHyp, we have to enable IO for log retrieval. Otherwise, * 0xFF's is always returned from PCI config space. + * + * When the @severity is EEH_LOG_PERM, the PE is going to be + * removed. Prior to that, the drivers for devices included in + * the PE will be closed. The drivers rely on working IO path + * to bring the devices to quiet state. Otherwise, PCI traffic + * from those devices after they are removed is like to cause + * another unexpected EEH error. */ if (!(pe->type & EEH_PE_PHB)) { - if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || + severity == EEH_LOG_PERM) eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); /* -- cgit v0.10.2 From 5dcd085942761174f6ff1271fe707e4e2308d64c Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 17 Jan 2017 19:19:17 +0200 Subject: net: ethtool: Initialize buffer when querying device channel settings [ Upstream commit 31a86d137219373c3222ca5f4f912e9a4d8065bb ] Ethtool channels respond struct was uninitialized when querying device channel boundaries settings. As a result, unreported fields by the driver hold garbage. This may cause sending unsupported params to driver. Fixes: 8bf368620486 ('ethtool: ensure channel counts are within bounds ...') Signed-off-by: Eran Ben Elisha Signed-off-by: Tariq Toukan CC: John W. Linville Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 072c1f4..e9989b8 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1704,7 +1704,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev, static noinline_for_stack int ethtool_set_channels(struct net_device *dev, void __user *useraddr) { - struct ethtool_channels channels, max; + struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS }; u32 max_rx_in_use = 0; if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) -- cgit v0.10.2 From 7bdccaa5da12f294636de312c73d7d33dfaa947c Mon Sep 17 00:00:00 2001 From: Igor Druzhinin Date: Tue, 17 Jan 2017 20:49:37 +0000 Subject: xen-netback: fix memory leaks on XenBus disconnect [ Upstream commit 9a6cdf52b85ea5fb21d2bb31e4a7bc61b79923a7 ] Eliminate memory leaks introduced several years ago by cleaning the queue resources which are allocated on XenBus connection event. Namely, queue structure array and pages used for IO rings. Signed-off-by: Igor Druzhinin Reviewed-by: Paul Durrant Acked-by: Wei Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 8674e18..abffdbd 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -493,11 +493,20 @@ static int backend_create_xenvif(struct backend_info *be) static void backend_disconnect(struct backend_info *be) { if (be->vif) { + unsigned int queue_index; + xen_unregister_watchers(be->vif); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(be->vif); #endif /* CONFIG_DEBUG_FS */ xenvif_disconnect_data(be->vif); + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) + xenvif_deinit_queue(&be->vif->queues[queue_index]); + + vfree(be->vif->queues); + be->vif->num_queues = 0; + be->vif->queues = NULL; + xenvif_disconnect_ctrl(be->vif); } } @@ -1040,6 +1049,8 @@ static void connect(struct backend_info *be) err: if (be->vif->num_queues > 0) xenvif_disconnect_data(be->vif); /* Clean up existing queues */ + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) + xenvif_deinit_queue(&be->vif->queues[queue_index]); vfree(be->vif->queues); be->vif->queues = NULL; be->vif->num_queues = 0; -- cgit v0.10.2 From da805bc788b0dfce728b22d2595e569d2ee9769e Mon Sep 17 00:00:00 2001 From: Igor Druzhinin Date: Tue, 17 Jan 2017 20:49:38 +0000 Subject: xen-netback: protect resource cleaning on XenBus disconnect [ Upstream commit f16f1df65f1cf139ff9e9f84661e6573d6bb27fc ] vif->lock is used to protect statistics gathering agents from using the queue structure during cleaning. Signed-off-by: Igor Druzhinin Acked-by: Wei Liu Reviewed-by: Paul Durrant Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 74dc2bf..b009d79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; - unsigned int num_queues = vif->num_queues; unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long tx_bytes = 0; unsigned long tx_packets = 0; unsigned int index; + spin_lock(&vif->lock); if (vif->queues == NULL) goto out; /* Aggregate tx and rx stats from each queue */ - for (index = 0; index < num_queues; ++index) { + for (index = 0; index < vif->num_queues; ++index) { queue = &vif->queues[index]; rx_bytes += queue->stats.rx_bytes; rx_packets += queue->stats.rx_packets; @@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) } out: + spin_unlock(&vif->lock); + vif->dev->stats.rx_bytes = rx_bytes; vif->dev->stats.rx_packets = rx_packets; vif->dev->stats.tx_bytes = tx_bytes; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index abffdbd..b44f37f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -503,9 +503,11 @@ static void backend_disconnect(struct backend_info *be) for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) xenvif_deinit_queue(&be->vif->queues[queue_index]); + spin_lock(&be->vif->lock); vfree(be->vif->queues); be->vif->num_queues = 0; be->vif->queues = NULL; + spin_unlock(&be->vif->lock); xenvif_disconnect_ctrl(be->vif); } -- cgit v0.10.2 From a7a2a6d34fe78261945a5eb5eeca6c4fa3ad800e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 17 Jan 2017 22:07:19 -0500 Subject: bnxt_en: Fix "uninitialized variable" bug in TPA code path. [ Upstream commit 719ca8111402aa6157bd83a3c966d184db0d8956 ] In the TPA GRO code path, initialize the tcp_opt_len variable to 0 so that it will be correct for packets without TCP timestamps. The bug caused the SKB fields to be incorrectly set up for packets without TCP timestamps, leading to these packets being rejected by the stack. Reported-by: Andy Gospodarek Acked-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5cc0f8c..20e569b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1097,7 +1097,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, { #ifdef CONFIG_INET struct tcphdr *th; - int len, nw_off, tcp_opt_len; + int len, nw_off, tcp_opt_len = 0; if (tcp_ts) tcp_opt_len = 12; -- cgit v0.10.2 From 251d00bf1309c65316f5bd3850b2ca523b46921c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 18 Jan 2017 15:14:17 +0100 Subject: bpf: don't trigger OOM killer under pressure with map alloc [ Upstream commit d407bd25a204bd66b7346dde24bd3d37ef0e0b05 ] This patch adds two helpers, bpf_map_area_alloc() and bpf_map_area_free(), that are to be used for map allocations. Using kmalloc() for very large allocations can cause excessive work within the page allocator, so i) fall back earlier to vmalloc() when the attempt is considered costly anyway, and even more importantly ii) don't trigger OOM killer with any of the allocators. Since this is based on a user space request, for example, when creating maps with element pre-allocation, we really want such requests to fail instead of killing other user space processes. Also, don't spam the kernel log with warnings should any of the allocations fail under pressure. Given that, we can make backend selection in bpf_map_area_alloc() generic, and convert all maps over to use this API for spots with potentially large allocation requests. Note, replacing the one kmalloc_array() is fine as overflow checks happen earlier in htab_map_alloc(), since it must also protect the multiplication for vmalloc() should kmalloc_array() fail. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c201017..97498be 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -243,6 +243,8 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); +void *bpf_map_area_alloc(size_t size); +void bpf_map_area_free(void *base); extern int sysctl_unprivileged_bpf_disabled; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index a2ac051..f3721e1 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -11,7 +11,6 @@ */ #include #include -#include #include #include #include @@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) if (array_size >= U32_MAX - PAGE_SIZE) return ERR_PTR(-ENOMEM); - /* allocate all map elements and zero-initialize them */ - array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); - if (!array) { - array = vzalloc(array_size); - if (!array) - return ERR_PTR(-ENOMEM); - } + array = bpf_map_area_alloc(array_size); + if (!array) + return ERR_PTR(-ENOMEM); /* copy mandatory map attributes */ array->map.map_type = attr->map_type; @@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) if (array_size >= U32_MAX - PAGE_SIZE || elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { - kvfree(array); + bpf_map_area_free(array); return ERR_PTR(-ENOMEM); } out: @@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map) if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) bpf_array_free_percpu(array); - kvfree(array); + bpf_map_area_free(array); } static const struct bpf_map_ops array_ops = { @@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map) /* make sure it's empty */ for (i = 0; i < array->map.max_entries; i++) BUG_ON(array->ptrs[i] != NULL); - kvfree(array); + + bpf_map_area_free(array); } static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index ad1bc67..ad2f0ed 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "percpu_freelist.h" struct bucket { @@ -84,14 +83,15 @@ static void htab_free_elems(struct bpf_htab *htab) free_percpu(pptr); } free_elems: - vfree(htab->elems); + bpf_map_area_free(htab->elems); } static int prealloc_elems_and_freelist(struct bpf_htab *htab) { int err = -ENOMEM, i; - htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); + htab->elems = bpf_map_area_alloc(htab->elem_size * + htab->map.max_entries); if (!htab->elems) return -ENOMEM; @@ -227,14 +227,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) goto free_htab; err = -ENOMEM; - htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), - GFP_USER | __GFP_NOWARN); - - if (!htab->buckets) { - htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket)); - if (!htab->buckets) - goto free_htab; - } + htab->buckets = bpf_map_area_alloc(htab->n_buckets * + sizeof(struct bucket)); + if (!htab->buckets) + goto free_htab; for (i = 0; i < htab->n_buckets; i++) { INIT_HLIST_HEAD(&htab->buckets[i].head); @@ -258,7 +254,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) free_extra_elems: free_percpu(htab->extra_elems); free_buckets: - kvfree(htab->buckets); + bpf_map_area_free(htab->buckets); free_htab: kfree(htab); return ERR_PTR(err); @@ -715,7 +711,7 @@ static void htab_map_free(struct bpf_map *map) pcpu_freelist_destroy(&htab->freelist); } free_percpu(htab->extra_elems); - kvfree(htab->buckets); + bpf_map_area_free(htab->buckets); kfree(htab); } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 732ae16..be85191 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include "percpu_freelist.h" @@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; int err; - smap->elems = vzalloc(elem_size * smap->map.max_entries); + smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); if (!smap->elems) return -ENOMEM; @@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) return 0; free_elems: - vfree(smap->elems); + bpf_map_area_free(smap->elems); return err; } @@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-E2BIG); - smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); - if (!smap) { - smap = vzalloc(cost); - if (!smap) - return ERR_PTR(-ENOMEM); - } + smap = bpf_map_area_alloc(cost); + if (!smap) + return ERR_PTR(-ENOMEM); err = -E2BIG; cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); @@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) put_buffers: put_callchain_buffers(); free_smap: - kvfree(smap); + bpf_map_area_free(smap); return ERR_PTR(err); } @@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map) /* wait for bpf programs to complete before freeing stack map */ synchronize_rcu(); - vfree(smap->elems); + bpf_map_area_free(smap->elems); pcpu_freelist_destroy(&smap->freelist); - kvfree(smap); + bpf_map_area_free(smap); put_callchain_buffers(); } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 237f3d6..72ea91d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -48,6 +50,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl) list_add(&tl->list_node, &bpf_map_types); } +void *bpf_map_area_alloc(size_t size) +{ + /* We definitely need __GFP_NORETRY, so OOM killer doesn't + * trigger under memory pressure as we really just want to + * fail instead. + */ + const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; + void *area; + + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { + area = kmalloc(size, GFP_USER | flags); + if (area != NULL) + return area; + } + + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, + PAGE_KERNEL); +} + +void bpf_map_area_free(void *area) +{ + kvfree(area); +} + int bpf_map_precharge_memlock(u32 pages) { struct user_struct *user = get_current_user(); -- cgit v0.10.2 From c48a862c47d481838b26f5d6cd5c29e2064339da Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 18 Jan 2017 14:29:21 +0100 Subject: objtool: Fix IRET's opcode [ Upstream commit b5b46c4740aed1538544f0fa849c5b76c7823469 ] The IRET opcode is 0xcf according to the Intel manual and also to objdump of my vmlinux: 1ea8: 48 cf iretq Fix the opcode in arch_decode_instruction(). The previous value (0xc5) seems to correspond to LDS. Signed-off-by: Jiri Slaby Acked-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170118132921.19319-1-jslaby@suse.cz Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 5e0dea2..039636f 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_RETURN; break; - case 0xc5: /* iret */ case 0xca: /* retf */ case 0xcb: /* retf */ + case 0xcf: /* iret */ *type = INSN_CONTEXT_SWITCH; break; -- cgit v0.10.2 From 83571e9ef7c91ef6e249aae374de068b30963551 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 18 Jan 2017 19:44:42 -0800 Subject: gianfar: Do not reuse pages from emergency reserve [ Upstream commit 69fed99baac186013840ced3524562841296034f ] A driver using dev_alloc_page() must not reuse a page that had to use emergency memory reserve. Otherwise all packets using this page will be immediately dropped, unless for very specific sockets having SOCK_MEMALLOC bit set. This issue might be hard to debug, because only a fraction of the RX ring buffer would suffer from drops. Fixes: 75354148ce69 ("gianfar: Add paged allocation and Rx S/G") Signed-off-by: Eric Dumazet Cc: Claudiu Manoil Acked-by: Claudiu Manoil Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d391bee..3f4e711 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2951,7 +2951,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, } /* try reuse page */ - if (unlikely(page_count(page) != 1)) + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) return false; /* change offset to the other half */ -- cgit v0.10.2 From e8b5068b64d0505fe138e3db243e6e3385ae1a15 Mon Sep 17 00:00:00 2001 From: Chandan Rajendra Date: Fri, 23 Dec 2016 15:00:18 +0530 Subject: Btrfs: Fix deadlock between direct IO and fast fsync [ Upstream commit 97dcdea076ecef41ea4aaa23d4397c2f622e4265 ] The following deadlock is seen when executing generic/113 test, ---------------------------------------------------------+---------------------------------------------------- Direct I/O task Fast fsync task ---------------------------------------------------------+---------------------------------------------------- btrfs_direct_IO __blockdev_direct_IO do_blockdev_direct_IO do_direct_IO btrfs_get_blocks_direct while (blocks needs to written) get_more_blocks (first iteration) btrfs_get_blocks_direct btrfs_create_dio_extent down_read(&BTRFS_I(inode) >dio_sem) Create and add extent map and ordered extent up_read(&BTRFS_I(inode) >dio_sem) btrfs_sync_file btrfs_log_dentry_safe btrfs_log_inode_parent btrfs_log_inode btrfs_log_changed_extents down_write(&BTRFS_I(inode) >dio_sem) Collect new extent maps and ordered extents wait for ordered extent completion get_more_blocks (second iteration) btrfs_get_blocks_direct btrfs_create_dio_extent down_read(&BTRFS_I(inode) >dio_sem) -------------------------------------------------------------------------------------------------------------- In the above description, Btrfs direct I/O code path has not yet started submitting bios for file range covered by the initial ordered extent. Meanwhile, The fast fsync task obtains the write semaphore and waits for I/O on the ordered extent to get completed. However, the Direct I/O task is now blocked on obtaining the read semaphore. To resolve the deadlock, this commit modifies the Direct I/O code path to obtain the read semaphore before invoking __blockdev_direct_IO(). The semaphore is then given up after __blockdev_direct_IO() returns. This allows the Direct I/O code to complete I/O on all the ordered extents it creates. Signed-off-by: Chandan Rajendra Reviewed-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bddbae7..cada3f9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7235,7 +7235,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, struct extent_map *em = NULL; int ret; - down_read(&BTRFS_I(inode)->dio_sem); if (type != BTRFS_ORDERED_NOCOW) { em = create_pinned_em(inode, start, len, orig_start, block_start, block_len, orig_block_len, @@ -7254,7 +7253,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, em = ERR_PTR(ret); } out: - up_read(&BTRFS_I(inode)->dio_sem); return em; } @@ -8707,6 +8705,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.unsubmitted_oe_range_start = (u64)offset; dio_data.unsubmitted_oe_range_end = (u64)offset; current->journal_info = &dio_data; + down_read(&BTRFS_I(inode)->dio_sem); } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags)) { inode_dio_end(inode); @@ -8719,6 +8718,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) iter, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, flags); if (iov_iter_rw(iter) == WRITE) { + up_read(&BTRFS_I(inode)->dio_sem); current->journal_info = NULL; if (ret < 0 && ret != -EIOCBQUEUED) { if (dio_data.reserve) -- cgit v0.10.2 From c3eab85ff11a8cd4def8cf2b4cc0610f6b47a8cd Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 1 Dec 2016 13:43:31 -0800 Subject: Btrfs: fix truncate down when no_holes feature is enabled [ Upstream commit 91298eec05cd8d4e828cf7ee5d4a6334f70cf69a ] For such a file mapping, [0-4k][hole][8k-12k] In NO_HOLES mode, we don't have the [hole] extent any more. Commit c1aa45759e90 ("Btrfs: fix shrinking truncate when the no_holes feature is enabled") fixed disk isize not being updated in NO_HOLES mode when data is not flushed. However, even if data has been flushed, we can still have trouble in updating disk isize since we updated disk isize to 'start' of the last evicted extent. Reviewed-by: Chris Mason Signed-off-by: Liu Bo Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cada3f9..a2a014b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4480,8 +4480,19 @@ search_again: if (found_type > min_type) { del_item = 1; } else { - if (item_end < new_size) + if (item_end < new_size) { + /* + * With NO_HOLES mode, for the following mapping + * + * [0-4k][hole][8k-12k] + * + * if truncating isize down to 6k, it ends up + * isize being 8k. + */ + if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) + last_size = new_size; break; + } if (found_key.offset >= new_size) del_item = 1; else -- cgit v0.10.2 From 884baf2abf6dd0273b821a1f9e06023438528a52 Mon Sep 17 00:00:00 2001 From: "G. Campana" Date: Thu, 19 Jan 2017 23:37:46 +0200 Subject: virtio_console: fix a crash in config_work_handler [ Upstream commit 8379cadf71c3ee8173a1c6fc1ea7762a9638c047 ] Using control_work instead of config_work as the 3rd argument to container_of results in an invalid portdev pointer. Indeed, the work structure is initialized as below: INIT_WORK(&portdev->config_work, &config_work_handler); It leads to a crash when portdev->vdev is dereferenced later. This bug is triggered when the guest uses a virtio-console without multiport feature and receives a config_changed virtio interrupt. Signed-off-by: G. Campana Reviewed-by: Amit Shah Signed-off-by: Michael S. Tsirkin Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 471a301..8f890c1 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1870,7 +1870,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; -- cgit v0.10.2 From 72191c7d82e7a559ef05b1b89e6365911a8726aa Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Thu, 19 Jan 2017 10:39:09 -0800 Subject: swiotlb-xen: update dev_addr after swapping pages [ Upstream commit f1225ee4c8fcf09afaa199b8b1f0450f38b8cd11 ] In xen_swiotlb_map_page and xen_swiotlb_map_sg_attrs, if the original page is not suitable, we swap it for another page from the swiotlb pool. In these cases, we don't update the previously calculated dma address for the page before calling xen_dma_map_page. Thus, we end up calling xen_dma_map_page passing the wrong dev_addr, resulting in xen_dma_map_page mistakenly assuming that the page is foreign when it is local. Fix the bug by updating dev_addr appropriately. This change has no effect on x86, because xen_dma_map_page is a stub there. Signed-off-by: Stefano Stabellini Signed-off-by: Pooya Keshavarzi Tested-by: Pooya Keshavarzi Reviewed-by: Boris Ostrovsky Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 8e7a3d6..679f79f 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (map == SWIOTLB_MAP_ERROR) return DMA_ERROR_CODE; + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, size, dir, attrs); - dev_addr = xen_phys_to_bus(map); /* * Ensure that the address returned is DMA'ble @@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, sg_dma_len(sgl) = 0; return 0; } + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, sg->length, dir, attrs); - sg->dma_address = xen_phys_to_bus(map); + sg->dma_address = dev_addr; } else { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed -- cgit v0.10.2 From 5d5c293af8348b540ef721d810f7549ac3ab81c2 Mon Sep 17 00:00:00 2001 From: Vineeth Remanan Pillai Date: Thu, 19 Jan 2017 08:35:39 -0800 Subject: xen-netfront: Fix Rx stall during network stress and OOM [ Upstream commit 90c311b0eeead647b708a723dbdde1eda3dcad05 ] During an OOM scenario, request slots could not be created as skb allocation fails. So the netback cannot pass in packets and netfront wrongly assumes that there is no more work to be done and it disables polling. This causes Rx to stall. The issue is with the retry logic which schedules the timer if the created slots are less than NET_RX_SLOTS_MIN. The count of new request slots to be pushed are calculated as a difference between new req_prod and rsp_cons which could be more than the actual slots, if there are unconsumed responses. The fix is to calculate the count of newly created slots as the difference between new req_prod and old req_prod. Signed-off-by: Vineeth Remanan Pillai Reviewed-by: Juergen Gross Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 0cdcb21..599cf50 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx.req_prod_pvt = req_prod; /* Not enough requests? Try again later. */ - if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } -- cgit v0.10.2 From 322baf72eed51cef55a61f5d4ac1b51bd7824c1a Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Fri, 13 Jan 2017 12:48:06 -0500 Subject: scsi: virtio_scsi: Reject commands when virtqueue is broken [ Upstream commit 773c7220e22d193e5667c352fcbf8d47eefc817f ] In the case of a graceful set of detaches, where the virtio-scsi-ccw disk is removed from the guest prior to the controller, the guest behaves quite normally. Specifically, the detach gets us into sd_sync_cache to issue a Synchronize Cache(10) command, which immediately fails (and is retried a couple of times) because the device has been removed. Later, the removal of the controller sees two CRWs presented, but there's no further indication of the removal from the guest viewpoint. [ 17.217458] sd 0:0:0:0: [sda] Synchronizing SCSI cache [ 17.219257] sd 0:0:0:0: [sda] Synchronize Cache(10) failed: Result: hostbyte=DID_BAD_TARGET driverbyte=DRIVER_OK [ 21.449400] crw_info : CRW reports slct=0, oflw=0, chn=1, rsc=3, anc=0, erc=4, rsid=2 [ 21.449406] crw_info : CRW reports slct=0, oflw=0, chn=0, rsc=3, anc=0, erc=4, rsid=0 However, on s390, the SCSI disks can be removed "by surprise" when an entire controller (host) is removed and all associated disks are removed via the loop in scsi_forget_host. The same call to sd_sync_cache is made, but because the controller has already been removed, the Synchronize Cache(10) command is neither issued (and then failed) nor rejected. That the I/O isn't returned means the guest cannot have other devices added nor removed, and other tasks (such as shutdown or reboot) issued by the guest will not complete either. The virtio ring has already been marked as broken (via virtio_break_device in virtio_ccw_remove), but we still attempt to queue the command only to have it remain there. The calling sequence provides a bit of distinction for us: virtscsi_queuecommand() -> virtscsi_kick_cmd() -> virtscsi_add_cmd() -> virtqueue_add_sgs() -> virtqueue_add() if success return 0 elseif vq->broken or vring_mapping_error() return -EIO else return -ENOSPC A return of ENOSPC is generally a temporary condition, so returning "host busy" from virtscsi_queuecommand makes sense here, to have it redriven in a moment or two. But the EIO return code is more of a permanent error and so it would be wise to return the I/O itself and allow the calling thread to finish gracefully. The result is these four kernel messages in the guest (the fourth one does not occur prior to this patch): [ 22.921562] crw_info : CRW reports slct=0, oflw=0, chn=1, rsc=3, anc=0, erc=4, rsid=2 [ 22.921580] crw_info : CRW reports slct=0, oflw=0, chn=0, rsc=3, anc=0, erc=4, rsid=0 [ 22.921978] sd 0:0:0:0: [sda] Synchronizing SCSI cache [ 22.921993] sd 0:0:0:0: [sda] Synchronize Cache(10) failed: Result: hostbyte=DID_BAD_TARGET driverbyte=DRIVER_OK I opted to fill in the same response data that is returned from the more graceful device detach, where the disk device is removed prior to the controller device. Signed-off-by: Eric Farman Reviewed-by: Fam Zheng Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index ec91bd0..c680d76 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, { struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + unsigned long flags; int req_size; + int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); @@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, req_size = sizeof(cmd->req.cmd); } - if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) + ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); + if (ret == -EIO) { + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + spin_lock_irqsave(&req_vq->vq_lock, flags); + virtscsi_complete_cmd(vscsi, cmd); + spin_unlock_irqrestore(&req_vq->vq_lock, flags); + } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; + } return 0; } -- cgit v0.10.2 From e9a87e0f5bbb3f3fd28048b923b9941687c6233f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Jan 2017 14:22:24 -0800 Subject: iwlwifi: fix kernel crash when unregistering thermal zone [ Upstream commit 92549cdc288f47f3a98cf80ac5890c91f5876a06 ] A recent firmware change seems to have enabled thermal zones on the iwlwifi driver. Unfortunately, my device fails when registering the thermal zone. This doesn't stop the driver from attempting to unregister the thermal zone at unload time, triggering a NULL pointer deference in strlen() off the thermal_zone_device_unregister() path. Don't unregister if name is NULL, for that case we failed registering. Do the same for the cooling zone. Signed-off-by: Jens Axboe Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 63a051b..bec7d9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) return; IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); - thermal_zone_device_unregister(mvm->tz_device.tzone); - mvm->tz_device.tzone = NULL; + if (mvm->tz_device.tzone) { + thermal_zone_device_unregister(mvm->tz_device.tzone); + mvm->tz_device.tzone = NULL; + } } static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) @@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) return; IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); - thermal_cooling_device_unregister(mvm->cooling_dev.cdev); - mvm->cooling_dev.cdev = NULL; + if (mvm->cooling_dev.cdev) { + thermal_cooling_device_unregister(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; + } } #endif /* CONFIG_THERMAL */ -- cgit v0.10.2 From e99d86d76eed4f4bccc01e58e0bb3c96fbe88f67 Mon Sep 17 00:00:00 2001 From: Zach Ploskey Date: Sun, 22 Jan 2017 00:47:19 -0800 Subject: platform/x86: ideapad-laptop: handle ACPI event 1 [ Upstream commit cfee5d63767b2e7997c1f36420d008abbe61565c ] On Ideapad laptops, ACPI event 1 is currently not handled. Many models log "ideapad_laptop: Unknown event: 1" every 20 seconds or so while running on battery power. Some convertible laptops receive this event when switching in and out of tablet mode. This adds and additional case for event 1 in ideapad_acpi_notify to call ideapad_input_report(priv, vpc_bit), so that the event is reported to userspace and we avoid unnecessary logging. Fixes bug #107481 (https://bugzilla.kernel.org/show_bug.cgi?id=107481) Fixes bug #65751 (https://bugzilla.kernel.org/show_bug.cgi?id=65751) Signed-off-by: Zach Ploskey Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index a7614fc..2f1615e 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) case 8: case 7: case 6: + case 1: ideapad_input_report(priv, vpc_bit); break; case 5: -- cgit v0.10.2 From c6f284899e01f9ea095d0e5d7aa2f3814915def1 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Fri, 20 Jan 2017 12:14:13 -0600 Subject: amd-xgbe: Check xgbe_init() return code [ Upstream commit 738f7f647371ff4cfc9646c99dba5b58ad142db3 ] The xgbe_init() routine returns a return code indicating success or failure, but the return code is not checked. Add code to xgbe_init() to issue a message when failures are seen and add code to check the xgbe_init() return code. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index ca106d4..3424435 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2825,8 +2825,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata) /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); - if (ret) + if (ret) { + netdev_err(pdata->netdev, "error flushing TX queues\n"); return ret; + } /* * Initialize DMA related features diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0f0f3014..1e4e8b2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_start\n"); - hw_if->init(pdata); + ret = hw_if->init(pdata); + if (ret) + return ret; ret = phy_if->phy_start(pdata); if (ret) -- cgit v0.10.2 From e5a2ba9af818cf214f2a0a1e431fb2b1102883c0 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 16:05:05 -0800 Subject: net: dsa: Check return value of phy_connect_direct() [ Upstream commit 4078b76cac68e50ccf1f76a74e7d3d5788aec3fe ] We need to check the return value of phy_connect_direct() in dsa_slave_phy_connect() otherwise we may be continuing the initialization of a slave network device with a PHY that already attached somewhere else and which will soon be in error because the PHY device is in error. The conditions for such an error to occur are that we have a port of our switch that is not disabled, and has the same port number as a PHY address (say both 5) that can be probed using the DSA slave MII bus. We end-up having this slave network device find a PHY at the same address as our port number, and we try to attach to it. A slave network (e.g: port 0) has already attached to our PHY device, and we try to re-attach it with a different network device, but since we ignore the error we would end-up initializating incorrect device references by the time the slave network interface is opened. The code has been (re)organized several times, making it hard to provide an exact Fixes tag, this is a bugfix nonetheless. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 3ff9d97..079d76b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1103,10 +1103,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, /* Use already configured phy mode */ if (p->phy_interface == PHY_INTERFACE_MODE_NA) p->phy_interface = p->phy->interface; - phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, - p->phy_interface); - - return 0; + return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); } static int dsa_slave_phy_setup(struct dsa_slave_priv *p, -- cgit v0.10.2 From 8895ef4e5357fa54e614c5654eb4416623c2feb6 Mon Sep 17 00:00:00 2001 From: Ding Pixel Date: Wed, 18 Jan 2017 17:26:38 +0800 Subject: drm/amdgpu: check ring being ready before using MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c5f21c9f878b8dcd54d0b9739c025ca73cb4c091 ] Return success when the ring is properly initialized, otherwise return failure. Tonga SRIOV VF doesn't have UVD and VCE engines, the initialization of these IPs is bypassed. The system crashes if application submit IB to their rings which are not ready to use. It could be a common issue if IP having ring buffer is disabled for some reason on specific ASIC, so it should check the ring being ready to use. Bug: amdgpu_test crashes system on Tonga VF. Signed-off-by: Ding Pixel Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82dc8d2..bfb4b91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } -- cgit v0.10.2 From ff3b1dd026bb1f9df6f345ec91b9a754d363306f Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 24 Jan 2017 17:50:26 +0100 Subject: vfio/spapr: fail tce_iommu_attach_group() when iommu_data is null [ Upstream commit bd00fdf198e2da475a2f4265a83686ab42d998a8 ] The recently added mediated VFIO driver doesn't know about powerpc iommu. It thus doesn't register a struct iommu_table_group in the iommu group upon device creation. The iommu_data pointer hence remains null. This causes a kernel oops when userspace tries to set the iommu type of a container associated with a mediated device to VFIO_SPAPR_TCE_v2_IOMMU. [ 82.585440] mtty mtty: MDEV: Registered [ 87.655522] iommu: Adding device 83b8f4f2-509f-382f-3c1e-e6bfe0fa1001 to group 10 [ 87.655527] vfio_mdev 83b8f4f2-509f-382f-3c1e-e6bfe0fa1001: MDEV: group_id = 10 [ 116.297184] Unable to handle kernel paging request for data at address 0x00000030 [ 116.297389] Faulting instruction address: 0xd000000007870524 [ 116.297465] Oops: Kernel access of bad area, sig: 11 [#1] [ 116.297611] SMP NR_CPUS=2048 [ 116.297611] NUMA [ 116.297627] PowerNV ... [ 116.297954] CPU: 33 PID: 7067 Comm: qemu-system-ppc Not tainted 4.10.0-rc5-mdev-test #8 [ 116.297993] task: c000000e7718b680 task.stack: c000000e77214000 [ 116.298025] NIP: d000000007870524 LR: d000000007870518 CTR: 0000000000000000 [ 116.298064] REGS: c000000e77217990 TRAP: 0300 Not tainted (4.10.0-rc5-mdev-test) [ 116.298103] MSR: 9000000000009033 [ 116.298107] CR: 84004444 XER: 00000000 [ 116.298154] CFAR: c00000000000888c DAR: 0000000000000030 DSISR: 40000000 SOFTE: 1 GPR00: d000000007870518 c000000e77217c10 d00000000787b0ed c000000eed2103c0 GPR04: 0000000000000000 0000000000000000 c000000eed2103e0 0000000f24320000 GPR08: 0000000000000104 0000000000000001 0000000000000000 d0000000078729b0 GPR12: c00000000025b7e0 c00000000fe08400 0000000000000001 000001002d31d100 GPR16: 000001002c22c850 00003ffff315c750 0000000043145680 0000000043141bc0 GPR20: ffffffffffffffed fffffffffffff000 0000000020003b65 d000000007706018 GPR24: c000000f16cf0d98 d000000007706000 c000000003f42980 c000000003f42980 GPR28: c000000f1575ac00 c000000003f429c8 0000000000000000 c000000eed2103c0 [ 116.298504] NIP [d000000007870524] tce_iommu_attach_group+0x10c/0x360 [vfio_iommu_spapr_tce] [ 116.298555] LR [d000000007870518] tce_iommu_attach_group+0x100/0x360 [vfio_iommu_spapr_tce] [ 116.298601] Call Trace: [ 116.298610] [c000000e77217c10] [d000000007870518] tce_iommu_attach_group+0x100/0x360 [vfio_iommu_spapr_tce] (unreliable) [ 116.298671] [c000000e77217cb0] [d0000000077033a0] vfio_fops_unl_ioctl+0x278/0x3e0 [vfio] [ 116.298713] [c000000e77217d40] [c0000000002a3ebc] do_vfs_ioctl+0xcc/0x8b0 [ 116.298745] [c000000e77217de0] [c0000000002a4700] SyS_ioctl+0x60/0xc0 [ 116.298782] [c000000e77217e30] [c00000000000b220] system_call+0x38/0xfc [ 116.298812] Instruction dump: [ 116.298828] 7d3f4b78 409effc8 3d220000 e9298020 3c800140 38a00018 608480c0 e8690028 [ 116.298869] 4800249d e8410018 7c7f1b79 41820230 2fa90000 419e0114 e9090020 [ 116.298914] ---[ end trace 1e10b0ced08b9120 ]--- This patch fixes the oops. Reported-by: Vaibhav Jain Signed-off-by: Greg Kurz Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 79ddcb0..85d3e64 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1292,6 +1292,10 @@ static int tce_iommu_attach_group(void *iommu_data, /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", iommu_group_id(iommu_group), iommu_group); */ table_group = iommu_group_get_iommudata(iommu_group); + if (!table_group) { + ret = -ENODEV; + goto unlock_exit; + } if (tce_groups_attached(container) && (!table_group->ops || !table_group->ops->take_ownership || -- cgit v0.10.2 From a6c3e01bf32e82494fb634801982e31f257f25cc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 23 Jan 2017 11:11:42 +0100 Subject: mlxsw: spectrum_router: Correctly reallocate adjacency entries [ Upstream commit a59b7e0246774e28193126fe7fdbbd0ae9c67dcc ] mlxsw_sp_nexthop_group_mac_update() is called in one of two cases: 1) When the MAC of a nexthop needs to be updated 2) When the size of a nexthop group has changed In the second case the adjacency entries for the nexthop group need to be reallocated from the adjacency table. In this case we must write to the entries the MAC addresses of all the nexthops that should be offloaded and not only those whose MAC changed. Otherwise, these entries would be filled with garbage data, resulting in packet loss. Fixes: a7ff87acd995 ("mlxsw: spectrum_router: Implement next-hop routing") Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6905630..9e31a33 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1178,7 +1178,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, static int mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp) + struct mlxsw_sp_nexthop_group *nh_grp, + bool reallocate) { u32 adj_index = nh_grp->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; @@ -1193,7 +1194,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, continue; } - if (nh->update) { + if (nh->update || reallocate) { err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, adj_index, nh); if (err) @@ -1254,7 +1255,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* Nothing was added or removed, so no need to reallocate. Just * update MAC on existing adjacency indexes. */ - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, + false); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; @@ -1282,7 +1284,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, nh_grp->adj_index_valid = 1; nh_grp->adj_index = adj_index; nh_grp->ecmp_size = ecmp_size; - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; -- cgit v0.10.2 From 7fdc81f6e1a9b3f520e40cfc4ebccc94858da62d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 23 Jan 2017 21:37:52 +0200 Subject: virtio_net: fix PAGE_SIZE > 64k [ Upstream commit d0fa28f00052391b5df328f502fbbdd4444938b7 ] I don't have any guests with PAGE_SIZE > 64k but the code seems to be clearly broken in that case as PAGE_SIZE / MERGEABLE_BUFFER_ALIGN will need more than 8 bit and so the code in mergeable_ctx_to_buf_address does not give us the actual true size. Cc: John Fastabend Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7ca9989..1568aed 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -47,8 +47,16 @@ module_param(gso, bool, 0444); */ DECLARE_EWMA(pkt_len, 1, 64) +/* With mergeable buffers we align buffer address and use the low bits to + * encode its true size. Buffer size is up to 1 page so we need to align to + * square root of page size to ensure we reserve enough bits to encode the true + * size. + */ +#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) + /* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ + 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) #define VIRTNET_DRIVER_VERSION "1.0.0" -- cgit v0.10.2 From b07bf2364605dc7d78401b7eb02a533b0b6ddc05 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 Jan 2017 16:43:05 -0800 Subject: ip6_tunnel: must reload ipv6h in ip6ip6_tnl_xmit() [ Upstream commit 21b995a9cb093fff33ec91d7cb3822b882a90a1e ] Since ip6_tnl_parse_tlv_enc_lim() can call pskb_may_pull(), we must reload any pointer that was related to skb->head (or skb->data), or risk use after free. Fixes: c12b395a4664 ("gre: Support GRE over IPv6") Signed-off-by: Eric Dumazet Cc: Dmitry Kozlov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0a59220..a5fdc1a 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -582,6 +582,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) return -1; offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); + /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ + ipv6h = ipv6_hdr(skb); + if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 116b4da..63fad24 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1313,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowlabel = key->label; } else { offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); + /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ + ipv6h = ipv6_hdr(skb); if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; -- cgit v0.10.2 From 32bd4d2ed9d8355edc2263947286c8039c6bf171 Mon Sep 17 00:00:00 2001 From: Balakrishnan Raman Date: Mon, 23 Jan 2017 20:44:33 -0800 Subject: vxlan: do not age static remote mac entries [ Upstream commit efb5f68f32995c146944a9d4257c3cf8eae2c4a1 ] Mac aging is applicable only for dynamically learnt remote mac entries. Check for user configured static remote mac entries and skip aging. Signed-off-by: Balakrishnan Raman Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 55c4408..963e533 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2285,7 +2285,7 @@ static void vxlan_cleanup(unsigned long arg) = container_of(p, struct vxlan_fdb, hlist); unsigned long timeout; - if (f->state & NUD_PERMANENT) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) continue; timeout = f->used + vxlan->cfg.age_interval * HZ; -- cgit v0.10.2 From 00f468f51dd5182390b4e859dced75f22e89034e Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 24 Jan 2017 07:28:41 +0100 Subject: ibmveth: Add a proper check for the availability of the checksum features [ Upstream commit 23d28a859fb847fd7fcfbd31acb3b160abb5d6ae ] When using the ibmveth driver in a KVM/QEMU based VM, it currently always prints out a scary error message like this when it is started: ibmveth 71000003 (unregistered net_device): unable to change checksum offload settings. 1 rc=-2 ret_attr=71000003 This happens because the driver always tries to enable the checksum offloading without checking for the availability of this feature first. QEMU does not support checksum offloading for the spapr-vlan device, thus we always get the error message here. According to the LoPAPR specification, the "ibm,illan-options" property of the corresponding device tree node should be checked first to see whether the H_ILLAN_ATTRIUBTES hypercall and thus the checksum offloading feature is available. Thus let's do this in the ibmveth driver, too, so that the error message is really only limited to cases where something goes wrong, and does not occur if the feature is just missing. Signed-off-by: Thomas Huth Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 03dca73..b375ae9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1604,8 +1604,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); - netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_features = NETIF_F_SG; + if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + } netdev->features |= netdev->hw_features; -- cgit v0.10.2 From c5c8743642aee21300b99540643549054edbf17f Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 24 Jan 2017 15:18:29 -0800 Subject: kernel/panic.c: add missing \n [ Upstream commit ff7a28a074ccbea999dadbb58c46212cf90984c6 ] When a system panics, the "Rebooting in X seconds.." message is never printed because it lacks a new line. Fix it. Link: http://lkml.kernel.org/r/20170119114751.2724-1-jslaby@suse.cz Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/panic.c b/kernel/panic.c index e6480e2..dbec387 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -249,7 +249,7 @@ void panic(const char *fmt, ...) * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked. */ - pr_emerg("Rebooting in %d seconds..", panic_timeout); + pr_emerg("Rebooting in %d seconds..\n", panic_timeout); for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); -- cgit v0.10.2 From 2bc8fcd633d8e7d59a242eb4d86fbebb8cf7ff61 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 25 Jan 2017 09:20:54 +0100 Subject: Documentation: devicetree: change the mediatek ethernet compatible string [ Upstream commit 61976fff20f92aceecc3670f6168bfc57a79e047 ] When the binding was defined, I was not aware that mt2701 was an earlier version of the SoC. For sake of consistency, the ethernet driver should use mt2701 inside the compat string as this is the earliest SoC with the ethernet core. The ethernet driver is currently of no real use until we finish and upstream the DSA driver. There are no users of this binding yet. It should be safe to fix this now before it is too late and we need to provide backward compatibility for the mt7623-eth compat string. Reported-by: Sean Wang Signed-off-by: John Crispin Reviewed-by: Matthias Brugger Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt index c010faf..c7194e8 100644 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt @@ -7,7 +7,7 @@ have dual GMAC each represented by a child node.. * Ethernet controller node Required properties: -- compatible: Should be "mediatek,mt7623-eth" +- compatible: Should be "mediatek,mt2701-eth" - reg: Address and length of the register set for the device - interrupts: Should contain the three frame engines interrupts in numeric order. These are fe_int0, fe_int1 and fe_int2. -- cgit v0.10.2 From b8c5e7b1241362a131a2364fd166f8c8fdd9b363 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 12 Dec 2016 16:15:17 +0100 Subject: drm/etnaviv: trick drm_mm into giving out a low IOVA [ Upstream commit 3546fb0cdac25a79c89d87020566fab52b92867d ] After rollover of the IOVA space, we want to get a low IOVA address, otherwise the the games we play by remembering the last IOVA are pointless. When we search for a free hole with DRM_MM_SEARCH_DEFAULT, drm_mm will pop the next entry from the free holes stack, which will likely be a high IOVA. By using DRM_MM_SEARCH_BELOW we can trick drm_mm into reversing the search and provide us with a low IOVA. Signed-off-by: Lucas Stach Reviewed-by: Wladimir van der Laan Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96..fe0e85b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, struct list_head list; bool found; + /* + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick + * drm_mm into giving out a low IOVA after address space + * rollover. This needs a proper fix. + */ ret = drm_mm_insert_node_in_range(&mmu->mm, node, size, 0, mmu->last_iova, ~0UL, - DRM_MM_SEARCH_DEFAULT); + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); if (ret != -ENOSPC) break; -- cgit v0.10.2 From 3a6edbc95ba0df871e1eb72a411c0fa06644785e Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Thu, 5 Jan 2017 10:09:25 -0500 Subject: perf/x86/intel/uncore: Fix hardcoded socket 0 assumption in the Haswell init code [ Upstream commit 6d6daa20945f3f598e56e18d1f926c08754f5801 ] hswep_uncore_cpu_init() uses a hardcoded physical package id 0 for the boot cpu. This works as long as the boot CPU is actually on the physical package 0, which is normaly the case after power on / reboot. But it fails with a NULL pointer dereference when a kdump kernel is started on a secondary socket which has a different physical package id because the locigal package translation for physical package 0 does not exist. Use the logical package id of the boot cpu instead of hard coded 0. [ tglx: Rewrote changelog once more ] Fixes: cf6d445f6897 ("perf/x86/uncore: Track packages, not per CPU data") Signed-off-by: Prarit Bhargava Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Harish Chegondi Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1483628965-2890-1-git-send-email-prarit@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 2724277..afe8024 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = { void hswep_uncore_cpu_init(void) { - int pkg = topology_phys_to_logical_pkg(0); + int pkg = boot_cpu_data.logical_proc_id; if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; -- cgit v0.10.2 From 849f2d0665e049c21dbac8c0fa566a8ac04fead5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 2 Jan 2017 14:07:22 +0200 Subject: pinctrl: intel: Set pin direction properly [ Upstream commit 17fab473693e8357a9aa6fee4fbed6c13a34bd81 ] There are two bits in the PADCFG0 register to configure direction, one per TX/RX buffers. For now we wrongly assume that the GPIO is always requested before it is being used, which is not true when the GPIO is used through irqchip. In this case the GPIO is never requested and we never enable RX buffer for it. Fix this by setting both bits accordingly. Reported-by: Jarkko Nikula Signed-off-by: Andy Shevchenko Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 0144376..b40a074 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, return 0; } +static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) +{ + u32 value; + + value = readl(padcfg0); + if (input) { + value &= ~PADCFG0_GPIORXDIS; + value |= PADCFG0_GPIOTXDIS; + } else { + value &= ~PADCFG0_GPIOTXDIS; + value |= PADCFG0_GPIORXDIS; + } + writel(value, padcfg0); +} + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) @@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); - /* Disable TX buffer and enable RX (this will be input) */ - value &= ~PADCFG0_GPIORXDIS; - value |= PADCFG0_GPIOTXDIS; writel(value, padcfg0); + /* Disable TX buffer and enable RX (this will be input) */ + __intel_gpio_set_direction(padcfg0, true); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; - u32 value; raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - - value = readl(padcfg0); - if (input) - value |= PADCFG0_GPIOTXDIS; - else - value &= ~PADCFG0_GPIOTXDIS; - writel(value, padcfg0); + __intel_gpio_set_direction(padcfg0, input); raw_spin_unlock_irqrestore(&pctrl->lock, flags); -- cgit v0.10.2 From d48cb21fd50bf6bea379ad04dc2baced20cf5275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 Jan 2017 23:13:45 +0000 Subject: net: phy: marvell: fix Marvell 88E1512 used in SGMII mode [ Upstream commit a13c06525ab9ff442924e67df9393a5efa914c56 ] When an Marvell 88E1512 PHY is connected to a nic in SGMII mode, the fiber page is used for the SGMII host-side connection. The PHY driver notices that SUPPORTED_FIBRE is set, so it tries reading the fiber page for the link status, and ends up reading the MAC-side status instead of the outgoing (copper) link. This leads to incorrect results reported via ethtool. If the PHY is connected via SGMII to the host, ignore the fiber page. However, continue to allow the existing power management code to suspend and resume the fiber page. Fixes: 6cfb3bcc0641 ("Marvell phy: check link status in case of fiber link.") Signed-off-by: Russell King Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2f70f80..c60c147 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1200,7 +1200,8 @@ static int marvell_read_status(struct phy_device *phydev) int err; /* Check the fiber mode first */ - if (phydev->supported & SUPPORTED_FIBRE) { + if (phydev->supported & SUPPORTED_FIBRE && + phydev->interface != PHY_INTERFACE_MODE_SGMII) { err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); if (err < 0) goto error; -- cgit v0.10.2 From 6130fac994818eb0fbc9dfc95056292e71fb3791 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 20 Oct 2016 08:52:50 +0200 Subject: mac80211: recalculate min channel width on VHT opmode changes [ Upstream commit d2941df8fbd9708035d66d889ada4d3d160170ce ] When an associated station changes its VHT operating mode this can/will affect the bandwidth it's using, and consequently we must recalculate the minimum bandwidth we need to use. Failure to do so can lead to one of two scenarios: 1) we use a too high bandwidth, this is benign 2) we use a too narrow bandwidth, causing rate control and actual PHY configuration to be out of sync, which can in turn cause problems/crashes Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8d7747e..37bec0f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -6,6 +6,7 @@ * Copyright (c) 2006 Jiri Benc * Copyright 2008, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1307,6 +1308,26 @@ static void ieee80211_iface_work(struct work_struct *work) } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_VHT) { switch (mgmt->u.action.u.vht_group_notif.action_code) { + case WLAN_VHT_ACTION_OPMODE_NOTIF: { + struct ieee80211_rx_status *status; + enum nl80211_band band; + u8 opmode; + + status = IEEE80211_SKB_RXCB(skb); + band = status->band; + opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + + if (sta) + ieee80211_vht_handle_opmode(sdata, sta, + opmode, + band); + + mutex_unlock(&local->sta_mtx); + break; + } case WLAN_VHT_ACTION_GROUPID_MGMT: ieee80211_process_mu_groups(sdata, mgmt); break; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c45a0fc..439e597 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2923,17 +2923,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.u.vht_opmode_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { - u8 opmode; - /* verify opmode is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; - - opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; - - ieee80211_vht_handle_opmode(rx->sdata, rx->sta, - opmode, status->band); - goto handled; + goto queue; } case WLAN_VHT_ACTION_GROUPID_MGMT: { if (len < IEEE80211_MIN_ACTION_SIZE + 25) diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 6832bf6..43e45bb 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); - if (changed > 0) + if (changed > 0) { + ieee80211_recalc_min_chandef(sdata); rate_control_rate_update(local, sband, sta, changed); + } } void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, -- cgit v0.10.2 From c78b8de5c05c73ff451b7c5a085766b421920ccd Mon Sep 17 00:00:00 2001 From: Colin King Date: Wed, 11 Jan 2017 11:43:10 +0000 Subject: perf/x86/intel: Use ULL constant to prevent undefined shift behaviour [ Upstream commit ad5013d5699d30ded0cdbbc68b93b2aa28222c6e ] When x86_pmu.num_counters is 32 the shift of the integer constant 1 is exceeding 32bit and therefor undefined behaviour. Fix this by shifting 1ULL instead of 1. Reported-by: CoverityScan CID#1192105 ("Bad bit shift operation") Signed-off-by: Colin Ian King Cc: Andi Kleen Cc: Peter Zijlstra Cc: Kan Liang Cc: Stephane Eranian Cc: Alexander Shishkin Link: http://lkml.kernel.org/r/20170111114310.17928-1-colin.king@canonical.com Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 24a6cd2..f0f197f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3978,7 +3978,7 @@ __init int intel_pmu_init(void) x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; } - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; + x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1; if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", -- cgit v0.10.2 From 582c1ca0ea1d13a9e2912c5a7530f0728b3c3d1c Mon Sep 17 00:00:00 2001 From: Brendan McGrath Date: Sat, 7 Jan 2017 08:01:38 +1100 Subject: HID: i2c-hid: Add sleep between POWER ON and RESET [ Upstream commit a89af4abdf9b353cdd6f61afc0eaaac403304873 ] Support for the Asus Touchpad was recently added. It turns out this device can fail initialisation (and become unusable) when the RESET command is sent too soon after the POWER ON command. Unfortunately the i2c-hid specification does not specify the need for a delay between these two commands. But it was discovered the Windows driver has a 1ms delay. As a result, this patch modifies the i2c-hid module to add a sleep inbetween the POWER ON and RESET commands which lasts between 1ms and 5ms. See https://github.com/vlasenko/hid-asus-dkms/issues/24 for further details. Signed-off-by: Brendan McGrath Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index b1bce80..8008e06 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -427,6 +427,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); -- cgit v0.10.2 From c32462d0b5232712f8a2a1d6cedb731115ba6f7b Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 11 Jan 2017 11:06:42 +0100 Subject: scsi: lpfc: avoid double free of resource identifiers [ Upstream commit cd60be4916ae689387d04b86b6fc15931e4c95ae ] Set variables initialized in lpfc_sli4_alloc_resource_identifiers() to NULL if an error occurred. Otherwise, lpfc_sli4_driver_resource_unset() attempts to free the memory again. Signed-off-by: Roberto Sassu Signed-off-by: Johannes Thumshirn Acked-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 49b4c79..2d4f4b5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5951,18 +5951,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); + phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } -- cgit v0.10.2 From aabb797b4c1204b2e8518538b2616e476f2bac92 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Wed, 11 Jan 2017 18:18:40 -0800 Subject: spi: davinci: use dma_mapping_error() [ Upstream commit c5a2a394835f473ae23931eda5066d3771d7b2f8 ] The correct error checking for dma_map_single() is to use dma_mapping_error(). Signed-off-by: Kevin Hilman Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index d36c11b..02fb967 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (!t->rx_dma) { + if (dma_mapping_error(&spi->dev, !t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } @@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_TO_DEVICE); - if (!t->tx_dma) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { ret = -EFAULT; goto err_tx_map; } -- cgit v0.10.2 From f88f06e1831878ecdd5fa78090a45ea8ff77f38f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2017 14:54:53 +0000 Subject: arm64: assembler: make adr_l work in modules under KASLR [ Upstream commit 41c066f2c4d436c535616fe182331766c57838f0 ] When CONFIG_RANDOMIZE_MODULE_REGION_FULL=y, the offset between loaded modules and the core kernel may exceed 4 GB, putting symbols exported by the core kernel out of the reach of the ordinary adrp/add instruction pairs used to generate relative symbol references. So make the adr_l macro emit a movz/movk sequence instead when executing in module context. While at it, remove the pointless special case for the stack pointer. Acked-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 28bfe61..851290d 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -155,22 +155,25 @@ lr .req x30 // link register /* * Pseudo-ops for PC-relative adr/ldr/str , where - * is within the range +/- 4 GB of the PC. + * is within the range +/- 4 GB of the PC when running + * in core kernel context. In module context, a movz/movk sequence + * is used, since modules may be loaded far away from the kernel + * when KASLR is in effect. */ /* * @dst: destination register (64 bit wide) * @sym: name of the symbol - * @tmp: optional scratch register to be used if == sp, which - * is not allowed in an adrp instruction */ - .macro adr_l, dst, sym, tmp= - .ifb \tmp + .macro adr_l, dst, sym +#ifndef MODULE adrp \dst, \sym add \dst, \dst, :lo12:\sym - .else - adrp \tmp, \sym - add \dst, \tmp, :lo12:\sym - .endif +#else + movz \dst, #:abs_g3:\sym + movk \dst, #:abs_g2_nc:\sym + movk \dst, #:abs_g1_nc:\sym + movk \dst, #:abs_g0_nc:\sym +#endif .endm /* @@ -181,6 +184,7 @@ lr .req x30 // link register * the address */ .macro ldr_l, dst, sym, tmp= +#ifndef MODULE .ifb \tmp adrp \dst, \sym ldr \dst, [\dst, :lo12:\sym] @@ -188,6 +192,15 @@ lr .req x30 // link register adrp \tmp, \sym ldr \dst, [\tmp, :lo12:\sym] .endif +#else + .ifb \tmp + adr_l \dst, \sym + ldr \dst, [\dst] + .else + adr_l \tmp, \sym + ldr \dst, [\tmp] + .endif +#endif .endm /* @@ -197,8 +210,13 @@ lr .req x30 // link register * while needs to be preserved. */ .macro str_l, src, sym, tmp +#ifndef MODULE adrp \tmp, \sym str \src, [\tmp, :lo12:\sym] +#else + adr_l \tmp, \sym + str \src, [\tmp] +#endif .endm /* -- cgit v0.10.2 From 4ae8dc6acb710419c8766c290b7fb5eac2f1ed68 Mon Sep 17 00:00:00 2001 From: Vadim Lomovtsev Date: Thu, 12 Jan 2017 07:28:06 -0800 Subject: net: thunderx: acpi: fix LMAC initialization [ Upstream commit 7aa4865506a26c607e00bd9794a85785b55ebca7 ] While probing BGX we requesting appropriate QLM for it's configuration and get LMAC count by that request. Then, while reading configured MAC values from SSDT table we need to save them in proper mapping: BGX[i]->lmac[j].mac = to later provide for initialization stuff. In order to fill such mapping properly we need to add lmac index to be used while acpi initialization since at this moment bgx->lmac_count already contains actual value. Signed-off-by: Vadim Lomovtsev Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 679679a..e858b1a 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -48,8 +48,9 @@ struct lmac { struct bgx { u8 bgx_id; struct lmac lmac[MAX_LMAC_PER_BGX]; - int lmac_count; + u8 lmac_count; u8 max_lmac; + u8 acpi_lmac_idx; void __iomem *reg_base; struct pci_dev *pdev; bool is_dlm; @@ -1159,13 +1160,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); - bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; + bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; + bgx->acpi_lmac_idx++; /* move to next LMAC */ out: - bgx->lmac_count++; return AE_OK; } -- cgit v0.10.2 From 77e82094a3c9d3ca8308a48a4b11037c6234a262 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Dec 2016 16:35:50 -0500 Subject: drm/radeon/si: load special ucode for certain MC configs [ Upstream commit ef736d394e85b1bf1fd65ba5e5257b85f6c82325 ] Special MC ucode is required for these memory configurations. Acked-by: Edward O'Callaghan Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3333e8a..b75d809 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -115,6 +115,8 @@ MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/si58_mc.bin"); + static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); static void si_program_aspm(struct radeon_device *rdev); @@ -1650,6 +1652,7 @@ static int si_init_microcode(struct radeon_device *rdev) int err; int new_fw = 0; bool new_smc = false; + bool si58_fw = false; DRM_DEBUG("\n"); @@ -1742,6 +1745,10 @@ static int si_init_microcode(struct radeon_device *rdev) default: BUG(); } + /* this memory configuration requires special firmware */ + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + si58_fw = true; + DRM_INFO("Loading %s Microcode\n", new_chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); @@ -1845,7 +1852,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); + if (si58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); -- cgit v0.10.2 From f275ac7fc5d2b6013980864f14d1ced016211349 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Jan 2017 19:26:49 +0800 Subject: drm/amd/powerplay: fix vce cg logic error on CZ/St. [ Upstream commit 3731d12dce83d47b357753ffc450ce03f1b49688 ] can fix Bug 191281: vce ib test failed. when vce idle, set vce clock gate, so the clock in vce domain will be disabled. when need to encode, disable vce clock gate, enable the clocks to vce engine. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 2028980..5b261c1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_CG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; -- cgit v0.10.2 From 25319ae8e8a72a3fcdac7c964d267ca3c4e7c0a0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Jan 2017 15:47:50 +0800 Subject: drm/amd/powerplay: refine vce dpm update code on Cz. [ Upstream commit ab8db87b8256e13a62f10af1d32f5fc233c398cc ] Program HardMin based on the vce_arbiter.ecclk if ecclk is 0, disable ECLK DPM 0. Otherwise VCE could hang if switching SCLK from DPM 0 to 6/7 Signed-off-by: Rex Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 9604249..189ec94 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk, PPSMC_MSG_SetEclkHardMin)); } else { - /*EPR# 419220 -HW limitation to to */ - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(hwmgr, - cz_hwmgr->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - + /*Program HardMin based on the vce_arbiter.ecclk */ + if (hwmgr->vce_arbiter.ecclk == 0) { + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, 0); + /* disable ECLK DPM 0. Otherwise VCE could hang if + * switching SCLK from DPM 0 to 6/7 */ + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkSoftMin, 1); + } else { + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + } } return 0; } -- cgit v0.10.2 From 8eaaf66d41adf7b9b31486f03d93de3a1013e28d Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Jan 2017 10:05:46 +0000 Subject: pmem: return EIO on read_pmem() failure [ Upstream commit d47d1d27fd6206c18806440f6ebddf51a806be4f ] The read_pmem() function uses memcpy_mcsafe() on x86 where an EFAULT error code indicates a failed read. Block I/O should use EIO to indicate failure. Other pmem code paths (like bad blocks) already use EIO so let's be consistent. This fixes compatibility with consumers like btrfs that try to parse the specific error code rather than treat all errors the same. Reviewed-by: Jeff Moyer Signed-off-by: Stefan Hajnoczi Signed-off-by: Dan Williams Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b480859..3456f53 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -87,7 +87,9 @@ static int read_pmem(struct page *page, unsigned int off, rc = memcpy_from_pmem(mem + off, pmem_addr, len); kunmap_atomic(mem); - return rc; + if (rc) + return -EIO; + return 0; } static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, -- cgit v0.10.2 From 6baa8c92dab9a43f0b363f1b7d7bd269d5efcf8d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 13 Jan 2017 11:28:25 +0100 Subject: mac80211: initialize SMPS field in HT capabilities [ Upstream commit 43071d8fb3b7f589d72663c496a6880fb097533c ] ibss and mesh modes copy the ht capabilites from the band without overriding the SMPS state. Unfortunately the default value 0 for the SMPS field means static SMPS instead of disabled. This results in HT ibss and mesh setups using only single-stream rates, even though SMPS is not supposed to be active. Initialize SMPS to disabled for all bands on ieee80211_hw_register to ensure that the value is sane where it is not overriden with the real SMPS state. Reported-by: Elektra Wagenrad Signed-off-by: Felix Fietkau [move VHT TODO comment to a better place] Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 1075ac2..2bb6899 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -908,12 +908,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; - if (sband->ht_cap.ht_supported) - local->rx_chains = - max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), - local->rx_chains); + if (!sband->ht_cap.ht_supported) + continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ + local->rx_chains = + max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), + local->rx_chains); + + /* no need to mask, SM_PS_DISABLED has all bits set */ + sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN */ -- cgit v0.10.2 From 283994074501393b67590220ec8015f60ee670a8 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 13 Jan 2017 01:11:18 -0500 Subject: x86/tsc: Add the Intel Denverton Processor to native_calibrate_tsc() [ Upstream commit 695085b4bc7603551db0b3da897b8bf9893ca218 ] The Intel Denverton microserver uses a 25 MHz TSC crystal, so we can derive its exact [*] TSC frequency using CPUID and some arithmetic, eg.: TSC: 1800 MHz (25000000 Hz * 216 / 3 / 1000000) [*] 'exact' is only as good as the crystal, which should be +/- 20ppm Signed-off-by: Len Brown Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/306899f94804aece6d8fa8b4223ede3b48dbb59c.1484287748.git.len.brown@intel.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index eea88fe..6e57edf 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void) crystal_khz = 24000; /* 24.0 MHz */ break; case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_ATOM_DENVERTON: crystal_khz = 25000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: -- cgit v0.10.2 From 48131dd0f2b19dd297147c23dc634432fecee638 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 12 Jan 2017 16:53:11 +0100 Subject: x86/mpx: Use compatible types in comparison to fix sparse error [ Upstream commit 453828625731d0ba7218242ef6ec88f59408f368 ] info->si_addr is of type void __user *, so it should be compared against something from the same address space. This fixes the following sparse error: arch/x86/mm/mpx.c:296:27: error: incompatible types in comparison expression (different address spaces) Signed-off-by: Tobias Klauser Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index e4f8009..3e7c489 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) * We were not able to extract an address from the instruction, * probably because there was something invalid in it. */ - if (info->si_addr == (void *)-1) { + if (info->si_addr == (void __user *)-1) { err = -EINVAL; goto err_out; } -- cgit v0.10.2 From 1c68633329d230dc350bc8c521689be4703f6016 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Dec 2016 14:59:00 +0100 Subject: perf/core: Fix sys_perf_event_open() vs. hotplug [ Upstream commit 63cae12bce9861cec309798d34701cf3da20bc71 ] There is problem with installing an event in a task that is 'stuck' on an offline CPU. Blocked tasks are not dis-assosciated from offlined CPUs, after all, a blocked task doesn't run and doesn't require a CPU etc.. Only on wakeup do we ammend the situation and place the task on a available CPU. If we hit such a task with perf_install_in_context() we'll loop until either that task wakes up or the CPU comes back online, if the task waking depends on the event being installed, we're stuck. While looking into this issue, I also spotted another problem, if we hit a task with perf_install_in_context() that is in the middle of being migrated, that is we observe the old CPU before sending the IPI, but run the IPI (on the old CPU) while the task is already running on the new CPU, things also go sideways. Rework things to rely on task_curr() -- outside of rq->lock -- which is rather tricky. Imagine the following scenario where we're trying to install the first event into our task 't': CPU0 CPU1 CPU2 (current == t) t->perf_event_ctxp[] = ctx; smp_mb(); cpu = task_cpu(t); switch(t, n); migrate(t, 2); switch(p, t); ctx = t->perf_event_ctxp[]; // must not be NULL smp_function_call(cpu, ..); generic_exec_single() func(); spin_lock(ctx->lock); if (task_curr(t)) // false add_event_to_ctx(); spin_unlock(ctx->lock); perf_event_context_sched_in(); spin_lock(ctx->lock); // sees event So its CPU0's store of t->perf_event_ctxp[] that must not go 'missing'. Because if CPU2's load of that variable were to observe NULL, it would not try to schedule the ctx and we'd have a task running without its counter, which would be 'bad'. As long as we observe !NULL, we'll acquire ctx->lock. If we acquire it first and not see the event yet, then CPU0 must observe task_curr() and retry. If the install happens first, then we must see the event on sched-in and all is well. I think we can translate the first part (until the 'must not be NULL') of the scenario to a litmus test like: C C-peterz { } P0(int *x, int *y) { int r1; WRITE_ONCE(*x, 1); smp_mb(); r1 = READ_ONCE(*y); } P1(int *y, int *z) { WRITE_ONCE(*y, 1); smp_store_release(z, 1); } P2(int *x, int *z) { int r1; int r2; r1 = smp_load_acquire(z); smp_mb(); r2 = READ_ONCE(*x); } exists (0:r1=0 /\ 2:r1=1 /\ 2:r2=0) Where: x is perf_event_ctxp[], y is our tasks's CPU, and z is our task being placed on the rq of CPU2. The P0 smp_mb() is the one added by this patch, ordering the store to perf_event_ctxp[] from find_get_context() and the load of task_cpu() in task_function_call(). The smp_store_release/smp_load_acquire model the RCpc locking of the rq->lock and the smp_mb() of P2 is the context switch switching from whatever CPU2 was running to our task 't'. This litmus test evaluates into: Test C-peterz Allowed States 7 0:r1=0; 2:r1=0; 2:r2=0; 0:r1=0; 2:r1=0; 2:r2=1; 0:r1=0; 2:r1=1; 2:r2=1; 0:r1=1; 2:r1=0; 2:r2=0; 0:r1=1; 2:r1=0; 2:r2=1; 0:r1=1; 2:r1=1; 2:r2=0; 0:r1=1; 2:r1=1; 2:r2=1; No Witnesses Positive: 0 Negative: 7 Condition exists (0:r1=0 /\ 2:r1=1 /\ 2:r2=0) Observation C-peterz Never 0 7 Hash=e427f41d9146b2a5445101d3e2fcaa34 And the strong and weak model agree. Reported-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Will Deacon Cc: jeremy.linton@arm.com Link: http://lkml.kernel.org/r/20161209135900.GU3174@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/events/core.c b/kernel/events/core.c index 11cc1d8..30ccc70 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2272,7 +2272,7 @@ static int __perf_install_in_context(void *info) struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event_context *task_ctx = cpuctx->task_ctx; - bool activate = true; + bool reprogram = true; int ret = 0; raw_spin_lock(&cpuctx->ctx.lock); @@ -2280,27 +2280,26 @@ static int __perf_install_in_context(void *info) raw_spin_lock(&ctx->lock); task_ctx = ctx; - /* If we're on the wrong CPU, try again */ - if (task_cpu(ctx->task) != smp_processor_id()) { - ret = -ESRCH; - goto unlock; - } + reprogram = (ctx->task == current); /* - * If we're on the right CPU, see if the task we target is - * current, if not we don't have to activate the ctx, a future - * context switch will do that for us. + * If the task is running, it must be running on this CPU, + * otherwise we cannot reprogram things. + * + * If its not running, we don't care, ctx->lock will + * serialize against it becoming runnable. */ - if (ctx->task != current) - activate = false; - else - WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx); + if (task_curr(ctx->task) && !reprogram) { + ret = -ESRCH; + goto unlock; + } + WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); } else if (task_ctx) { raw_spin_lock(&task_ctx->lock); } - if (activate) { + if (reprogram) { ctx_sched_out(ctx, cpuctx, EVENT_TIME); add_event_to_ctx(event, ctx); ctx_resched(cpuctx, task_ctx); @@ -2351,13 +2350,36 @@ perf_install_in_context(struct perf_event_context *ctx, /* * Installing events is tricky because we cannot rely on ctx->is_active * to be set in case this is the nr_events 0 -> 1 transition. + * + * Instead we use task_curr(), which tells us if the task is running. + * However, since we use task_curr() outside of rq::lock, we can race + * against the actual state. This means the result can be wrong. + * + * If we get a false positive, we retry, this is harmless. + * + * If we get a false negative, things are complicated. If we are after + * perf_event_context_sched_in() ctx::lock will serialize us, and the + * value must be correct. If we're before, it doesn't matter since + * perf_event_context_sched_in() will program the counter. + * + * However, this hinges on the remote context switch having observed + * our task->perf_event_ctxp[] store, such that it will in fact take + * ctx::lock in perf_event_context_sched_in(). + * + * We do this by task_function_call(), if the IPI fails to hit the task + * we know any future context switch of task must see the + * perf_event_ctpx[] store. */ -again: + /* - * Cannot use task_function_call() because we need to run on the task's - * CPU regardless of whether its current or not. + * This smp_mb() orders the task->perf_event_ctxp[] store with the + * task_cpu() load, such that if the IPI then does not find the task + * running, a future context switch of that task must observe the + * store. */ - if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event)) + smp_mb(); +again: + if (!task_function_call(task, __perf_install_in_context, event)) return; raw_spin_lock_irq(&ctx->lock); @@ -2371,12 +2393,16 @@ again: raw_spin_unlock_irq(&ctx->lock); return; } - raw_spin_unlock_irq(&ctx->lock); /* - * Since !ctx->is_active doesn't mean anything, we must IPI - * unconditionally. + * If the task is not running, ctx->lock will avoid it becoming so, + * thus we can safely install the event. */ - goto again; + if (task_curr(task)) { + raw_spin_unlock_irq(&ctx->lock); + goto again; + } + add_event_to_ctx(event, ctx); + raw_spin_unlock_irq(&ctx->lock); } /* -- cgit v0.10.2 From 82835fb33ce54820206c14580eb1a149c473c50c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 3 Jan 2017 15:24:54 +0100 Subject: perf/x86: Reject non sampling events with precise_ip [ Upstream commit 18e7a45af91acdde99d3aa1372cc40e1f8142f7b ] As Peter suggested [1] rejecting non sampling PEBS events, because they dont make any sense and could cause bugs in the NMI handler [2]. [1] http://lkml.kernel.org/r/20170103094059.GC3093@worktop [2] http://lkml.kernel.org/r/1482931866-6018-3-git-send-email-jolsa@kernel.org Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Vince Weaver Link: http://lkml.kernel.org/r/20170103142454.GA26251@krava Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 38623e2..9604b25 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.precise_ip > precise) return -EOPNOTSUPP; + + /* There's no sense in having PEBS for non sampling events: */ + if (!is_sampling_event(event)) + return -EINVAL; } /* * check that PEBS LBR correction does not conflict with -- cgit v0.10.2 From d21816c24591060a0af9fd258f85a1e5c04fba0f Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 13 Dec 2016 12:09:56 -0800 Subject: aio: fix lock dep warning [ Upstream commit a12f1ae61c489076a9aeb90bddca7722bf330df3 ] lockdep reports a warnning. file_start_write/file_end_write only acquire/release the lock for regular files. So checking the files in aio side too. [ 453.532141] ------------[ cut here ]------------ [ 453.533011] WARNING: CPU: 1 PID: 1298 at ../kernel/locking/lockdep.c:3514 lock_release+0x434/0x670 [ 453.533011] DEBUG_LOCKS_WARN_ON(depth <= 0) [ 453.533011] Modules linked in: [ 453.533011] CPU: 1 PID: 1298 Comm: fio Not tainted 4.9.0+ #964 [ 453.533011] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.9.0-1.fc24 04/01/2014 [ 453.533011] ffff8803a24b7a70 ffffffff8196cffb ffff8803a24b7ae8 0000000000000000 [ 453.533011] ffff8803a24b7ab8 ffffffff81091ee1 ffff8803a5dba700 00000dba00000008 [ 453.533011] ffffed0074496f59 ffff8803a5dbaf54 ffff8803ae0f8488 fffffffffffffdef [ 453.533011] Call Trace: [ 453.533011] [] dump_stack+0x67/0x9c [ 453.533011] [] __warn+0x111/0x130 [ 453.533011] [] warn_slowpath_fmt+0x97/0xb0 [ 453.533011] [] ? __warn+0x130/0x130 [ 453.533011] [] ? blk_finish_plug+0x29/0x60 [ 453.533011] [] lock_release+0x434/0x670 [ 453.533011] [] ? import_single_range+0xd4/0x110 [ 453.533011] [] ? rw_verify_area+0x65/0x140 [ 453.533011] [] ? aio_write+0x1f6/0x280 [ 453.533011] [] aio_write+0x229/0x280 [ 453.533011] [] ? aio_complete+0x640/0x640 [ 453.533011] [] ? debug_check_no_locks_freed+0x1a0/0x1a0 [ 453.533011] [] ? debug_lockdep_rcu_enabled.part.2+0x1a/0x30 [ 453.533011] [] ? debug_lockdep_rcu_enabled+0x35/0x40 [ 453.533011] [] ? __might_fault+0x7e/0xf0 [ 453.533011] [] do_io_submit+0x94c/0xb10 [ 453.533011] [] ? do_io_submit+0x23e/0xb10 [ 453.533011] [] ? SyS_io_destroy+0x270/0x270 [ 453.533011] [] ? mark_held_locks+0x23/0xc0 [ 453.533011] [] ? trace_hardirqs_on_thunk+0x1a/0x1c [ 453.533011] [] SyS_io_submit+0x10/0x20 [ 453.533011] [] entry_SYSCALL_64_fastpath+0x18/0xad [ 453.533011] [] ? trace_hardirqs_off_caller+0xc0/0x110 [ 453.533011] ---[ end trace b2fbe664d1cc0082 ]--- Cc: Dmitry Monakhov Cc: Jan Kara Cc: Christoph Hellwig Cc: Al Viro Reviewed-by: Christoph Hellwig Signed-off-by: Shaohua Li Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/fs/aio.c b/fs/aio.c index 428484f..0fcb49a 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2) * Tell lockdep we inherited freeze protection from submission * thread. */ - __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); + if (S_ISREG(file_inode(file)->i_mode)) + __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); file_end_write(file); } @@ -1492,7 +1493,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, * by telling it the lock got released so that it doesn't * complain about held lock when we return to userspace. */ - __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); + if (S_ISREG(file_inode(file)->i_mode)) + __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); } kfree(iovec); return ret; -- cgit v0.10.2 From 68a5dc38573586ad47befe5b91c62d7c2cb8141d Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Wed, 11 Jan 2017 13:25:00 -0600 Subject: coredump: Ensure proper size of sparse core files [ Upstream commit 4d22c75d4c7b5c5f4bd31054f09103ee490878fd ] If the last section of a core file ends with an unmapped or zero page, the size of the file does not correspond with the last dump_skip() call. gdb complains that the file is truncated and can be confusing to users. After all of the vma sections are written, make sure that the file size is no smaller than the current file position. This problem can be demonstrated with gdb's bigcore testcase on the sparc architecture. Signed-off-by: Dave Kleikamp Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 2472af2..cfd724f 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2296,6 +2296,7 @@ static int elf_core_dump(struct coredump_params *cprm) goto end_coredump; } } + dump_truncate(cprm); if (!elf_core_write_extra_data(cprm)) goto end_coredump; diff --git a/fs/coredump.c b/fs/coredump.c index eb9c92c..4407e27 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align) return mod ? dump_skip(cprm, align - mod) : 1; } EXPORT_SYMBOL(dump_align); + +/* + * Ensures that file size is big enough to contain the current file + * postion. This prevents gdb from complaining about a truncated file + * if the last "write" to the file was dump_skip. + */ +void dump_truncate(struct coredump_params *cprm) +{ + struct file *file = cprm->file; + loff_t offset; + + if (file->f_op->llseek && file->f_op->llseek != no_llseek) { + offset = file->f_op->llseek(file, 0, SEEK_CUR); + if (i_size_read(file->f_mapping->host) < offset) + do_truncate(file->f_path.dentry, offset, 0, file); + } +} +EXPORT_SYMBOL(dump_truncate); diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d016a12..28ffa94 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -14,6 +14,7 @@ struct coredump_params; extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); +extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP extern void do_coredump(const siginfo_t *siginfo); #else -- cgit v0.10.2 From 9d00195bc0afa0252b9cdb157eb4ed1e13631bc6 Mon Sep 17 00:00:00 2001 From: Nikita Yushchenko Date: Wed, 11 Jan 2017 21:56:31 +0300 Subject: swiotlb: ensure that page-sized mappings are page-aligned [ Upstream commit 602d9858f07c72eab64f5f00e2fae55f9902cfbe ] Some drivers do depend on page mappings to be page aligned. Swiotlb already enforces such alignment for mappings greater than page, extend that to page-sized mappings as well. Without this fix, nvme hits BUG() in nvme_setup_prps(), because that routine assumes page-aligned mappings. Signed-off-by: Nikita Yushchenko Reviewed-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ad1d296..b7812df 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -456,11 +456,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* - * For mappings greater than a page, we limit the stride (and - * hence alignment) to a page size. + * For mappings greater than or equal to a page, we limit the stride + * (and hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size > PAGE_SIZE) + if (size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; -- cgit v0.10.2 From 0e9867b7113c56b367f2e753cd411cf7cef0d2ec Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 28 Dec 2016 11:33:48 +0100 Subject: s390/ctl_reg: make __ctl_load a full memory barrier [ Upstream commit e991c24d68b8c0ba297eeb7af80b1e398e98c33f ] We have quite a lot of code that depends on the order of the __ctl_load inline assemby and subsequent memory accesses, like e.g. disabling lowcore protection and the writing to lowcore. Since the __ctl_load macro does not have memory barrier semantics, nor any other dependencies the compiler is, theoretically, free to shuffle code around. Or in other words: storing to lowcore could happen before lowcore protection is disabled. In order to avoid this class of potential bugs simply add a full memory barrier to the __ctl_load macro. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab..8e136b8 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -15,7 +15,9 @@ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ } #define __ctl_store(array, low, high) { \ -- cgit v0.10.2 From 5f54c4e1e2afd0a437e24c0b9689728c1afc1591 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Thu, 12 Jan 2017 16:09:44 +0100 Subject: usb: dwc2: gadget: Fix GUSBCFG.USBTRDTIM value [ Upstream commit ca02954ada711b08e5b0d84590a631fd63ed39f9 ] USBTrdTim must be programmed to 0x5 when phy has a UTMI+ 16-bit wide interface or 0x9 when it has a 8-bit wide interface. GUSBCFG reset value (Value After Reset: 0x1400) sets USBTrdTim to 0x5. In case of 8-bit UTMI+, without clearing GUSBCFG.USBTRDTIM mask, USBTrdTim results in 0xD (0x5 | 0x9). That's why we need to clear GUSBCFG.USBTRDTIM mask before setting USBTrdTim value, to ensure USBTrdTim is correctly set in case of 8-bit UTMI+. Signed-off-by: Amelie Delaunay Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 24fbebc..cfdd5c3 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2532,7 +2532,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; @@ -3403,7 +3403,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; -- cgit v0.10.2 From fa1dbf505aefe87cb3adbe279c3eaac087d5790d Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 13 Jan 2017 22:38:27 +0100 Subject: be2net: fix status check in be_cmd_pmac_add() [ Upstream commit fe68d8bfe59c561664aa87d827aa4b320eb08895 ] Return value from be_mcc_notify_wait() contains a base completion status together with an additional status. The base_status() macro need to be used to access base status. Fixes: e3a7ae2 be2net: Changing MAC Address of a VF was broken Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 0e74529..30e8550 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, err: mutex_unlock(&adapter->mcc_lock); - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; -- cgit v0.10.2 From 02434def6fd0df57a5c4b1309b7d16f985234a7d Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 13 Jan 2017 22:38:28 +0100 Subject: be2net: don't delete MAC on close on unprivileged BE3 VFs [ Upstream commit 6d928ae590c8d58cfd5cca997d54394de139cbb7 ] BE3 VFs without FILTMGMT privilege are not allowed to modify its MAC, VLAN table and UC/MC lists. So don't try to delete MAC on such VFs. Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9711ca4..a25d35a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3630,7 +3630,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) static void be_disable_if_filters(struct be_adapter *adapter) { - be_dev_mac_del(adapter, adapter->pmac_id[0]); + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) + be_dev_mac_del(adapter, adapter->pmac_id[0]); + be_clear_uc_list(adapter); be_clear_mc_list(adapter); -- cgit v0.10.2 From cc439964fab1a58f5f7d9041845228bdd6ddfa6c Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 13 Jan 2017 22:38:29 +0100 Subject: be2net: fix MAC addr setting on privileged BE3 VFs [ Upstream commit 34393529163af7163ef8459808e3cf2af7db7f16 ] During interface opening MAC address stored in netdev->dev_addr is programmed in the HW with exception of BE3 VFs where the initial MAC is programmed by parent PF. This is OK when MAC address is not changed when an interfaces is down. In this case the requested MAC is stored to netdev->dev_addr and later is stored into HW during opening. But this is not done for all BE3 VFs so the NIC HW does not know anything about this change and all traffic is filtered. This is the case of bonding if fail_over_mac == 0 where the MACs of the slaves are changed while they are down. The be2net behavior is too restrictive because if a BE3 VF has the FILTMGMT privilege then it is able to modify its MAC without any restriction. To solve the described problem the driver should take care about these privileged BE3 VFs so the MAC is programmed during opening. And by contrast unpriviled BE3 VFs should not be allowed to change its MAC in any case. Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a25d35a..b3c9cbe 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -319,6 +319,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) return 0; + /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC + * address + */ + if (BEx_chip(adapter) && be_virtfn(adapter) && + !check_privilege(adapter, BE_PRIV_FILTMGMT)) + return -EPERM; + /* if device is not running, copy MAC to netdev->dev_addr */ if (!netif_running(netdev)) goto done; @@ -3787,8 +3794,9 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status; - /* For BE3 VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) { status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; -- cgit v0.10.2 From e1eac347d971b59f3b7de732d488ef00e087e2f8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 11 Jan 2017 14:59:38 +0900 Subject: perf probe: Fix to show correct locations for events on modules [ Upstream commit d2d4edbebe07ddb77980656abe7b9bc7a9e0cdf7 ] Fix to show correct locations for events on modules by relocating given address instead of retrying after failure. This happens when the module text size is big enough, bigger than sh_addr, because the original code retries with given address + sh_addr if it failed to find CU DIE at the given address. Any address smaller than sh_addr always fails and it retries with the correct address, but addresses bigger than sh_addr will get a CU DIE which is on the given address (not adjusted by sh_addr). In my environment(x86-64), the sh_addr of ".text" section is 0x10030. Since i915 is a huge kernel module, we can see this issue as below. $ grep "[Tt] .*\[i915\]" /proc/kallsyms | sort | head -n1 ffffffffc0270000 t i915_switcheroo_can_switch [i915] ffffffffc0270000 + 0x10030 = ffffffffc0280030, so we'll check symbols cross this boundary. $ grep "[Tt] .*\[i915\]" /proc/kallsyms | grep -B1 ^ffffffffc028\ | head -n 2 ffffffffc027ff80 t haswell_init_clock_gating [i915] ffffffffc0280110 t valleyview_init_clock_gating [i915] So setup probes on both function and see what happen. $ sudo ./perf probe -m i915 -a haswell_init_clock_gating \ -a valleyview_init_clock_gating Added new events: probe:haswell_init_clock_gating (on haswell_init_clock_gating in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating in i915) You can now use it in all perf tools, such as: perf record -e probe:valleyview_init_clock_gating -aR sleep 1 $ sudo ./perf probe -l probe:haswell_init_clock_gating (on haswell_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) probe:valleyview_init_clock_gating (on i915_vga_set_decode:4@gpu/drm/i915/i915_drv.c in i915) As you can see, haswell_init_clock_gating is correctly shown, but valleyview_init_clock_gating is not. With this patch, both events are shown correctly. $ sudo ./perf probe -l probe:haswell_init_clock_gating (on haswell_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) Committer notes: In my case: # perf probe -m i915 -a haswell_init_clock_gating -a valleyview_init_clock_gating Added new events: probe:haswell_init_clock_gating (on haswell_init_clock_gating in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating in i915) You can now use it in all perf tools, such as: perf record -e probe:valleyview_init_clock_gating -aR sleep 1 # perf probe -l probe:haswell_init_clock_gating (on i915_getparam+432@gpu/drm/i915/i915_drv.c in i915) probe:valleyview_init_clock_gating (on __i915_printk+240@gpu/drm/i915/i915_drv.c in i915) # # readelf -SW /lib/modules/4.9.0+/build/vmlinux | egrep -w '.text|Name' [Nr] Name Type Address Off Size ES Flg Lk Inf Al [ 1] .text PROGBITS ffffffff81000000 200000 822fd3 00 AX 0 0 4096 # So both are b0rked, now with the fix: # perf probe -m i915 -a haswell_init_clock_gating -a valleyview_init_clock_gating Added new events: probe:haswell_init_clock_gating (on haswell_init_clock_gating in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating in i915) You can now use it in all perf tools, such as: perf record -e probe:valleyview_init_clock_gating -aR sleep 1 # perf probe -l probe:haswell_init_clock_gating (on haswell_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) # Both looks correct. Signed-off-by: Masami Hiramatsu Tested-by: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/148411436777.9978.1440275861947194930.stgit@devbox Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index df4debe..0278fe1 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1543,16 +1543,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, Dwarf_Addr _addr = 0, baseaddr = 0; const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; - bool reloc = false; -retry: + /* We always need to relocate the address for aranges */ + if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) + addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { - if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { - addr += baseaddr; - reloc = true; - goto retry; - } pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; -- cgit v0.10.2 From 18b200e0c8ee07e7e3f2b1bd7a5552b58457452f Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 13 Jan 2017 09:32:34 -0500 Subject: net: phy: dp83867: allow RGMII_TXID/RGMII_RXID interface types [ Upstream commit 34c55cf2fc75f8bf6ba87df321038c064cf2d426 ] Currently dp83867 driver returns error if phy interface type PHY_INTERFACE_MODE_RGMII_RXID is used to set the rx only internal delay. Similarly issue happens for PHY_INTERFACE_MODE_RGMII_TXID. Fix this by checking also the interface type if a particular delay value is missing in the phy dt bindings. Also update the DT document accordingly. Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt index 5d21141..75bcaa3 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83867.txt +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt @@ -3,9 +3,11 @@ Required properties: - reg - The ID number for the phy, usually a small integer - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h - for applicable values + for applicable values. Required only if interface type is + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h - for applicable values + for applicable values. Required only if interface type is + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h for applicable values diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 91177a4..4cad955 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -113,12 +113,16 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)) return ret; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) return ret; return of_property_read_u32(of_node, "ti,fifo-depth", -- cgit v0.10.2 From 9f8ffe4e09520e209f41d01c73a29598414123b1 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Fri, 13 Jan 2017 15:46:25 +0100 Subject: tipc: allocate user memory with GFP_KERNEL flag [ Upstream commit 57d5f64d83ab5b5a5118b1597386dd76eaf4340d ] Until now, we allocate memory always with GFP_ATOMIC flag. When the system is under memory pressure and a user tries to send, the send fails due to low memory. However, the user application can wait for free memory if we allocate it using GFP_KERNEL flag. In this commit, we use allocate memory with GFP_KERNEL for all user allocation. Reported-by: Rune Torgersen Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 6b109a8..02462d6 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, /* Send response, if necessary */ if (respond && (mtyp == DSC_REQ_MSG)) { - rskb = tipc_buf_acquire(MAX_H_SIZE); + rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC); if (!rskb) return; tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer); @@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b, req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) return -ENOMEM; - req->buf = tipc_buf_acquire(MAX_H_SIZE); + req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC); if (!req->buf) { kfree(req); return -ENOMEM; diff --git a/net/tipc/link.c b/net/tipc/link.c index bda89bf..4e8647a 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1395,7 +1395,7 @@ tnl: msg_set_seqno(hdr, seqno++); pktlen = msg_size(hdr); msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); - tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); + tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); if (!tnlskb) { pr_warn("%sunable to send packet\n", link_co_err); return; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 1bd9817..56ea0ad 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -58,12 +58,12 @@ static unsigned int align(unsigned int i) * NOTE: Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ -struct sk_buff *tipc_buf_acquire(u32 size) +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) { struct sk_buff *skb; unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; - skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); + skb = alloc_skb_fclone(buf_size, gfp); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); @@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, struct tipc_msg *msg; struct sk_buff *buf; - buf = tipc_buf_acquire(hdr_sz + data_sz); + buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); if (unlikely(!buf)) return NULL; @@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, /* No fragmentation needed? */ if (likely(msz <= pktmax)) { - skb = tipc_buf_acquire(msz); + skb = tipc_buf_acquire(msz, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; skb_orphan(skb); @@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, msg_set_importance(&pkthdr, msg_importance(mhdr)); /* Prepare first fragment */ - skb = tipc_buf_acquire(pktmax); + skb = tipc_buf_acquire(pktmax, GFP_KERNEL); if (!skb) return -ENOMEM; skb_orphan(skb); @@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, pktsz = drem + INT_H_SIZE; else pktsz = pktmax; - skb = tipc_buf_acquire(pktsz); + skb = tipc_buf_acquire(pktsz, GFP_KERNEL); if (!skb) { rc = -ENOMEM; goto error; @@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, if (msz > (max / 2)) return false; - _skb = tipc_buf_acquire(max); + _skb = tipc_buf_acquire(max, GFP_ATOMIC); if (!_skb) return false; @@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) /* Never return SHORT header; expand by replacing buffer if necessary */ if (msg_short(hdr)) { - *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen); + *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC); if (!*skb) goto exit; memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 50a7398..6c0455c 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr) return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG); } -struct sk_buff *tipc_buf_acquire(u32 size); +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp); bool tipc_msg_validate(struct sk_buff *skb); bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err); void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index c1cfd92..23f8899 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, u32 dest) { struct tipc_net *tn = net_generic(net, tipc_net_id); - struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); + struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); struct tipc_msg *msg; if (buf != NULL) { -- cgit v0.10.2 From b6f75b986a7f7b79953b94f9778de295a253c624 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 11 Jan 2017 15:01:57 +0900 Subject: perf probe: Fix to probe on gcc generated functions in modules [ Upstream commit 613f050d68a8ed3c0b18b9568698908ef7bbc1f7 ] Fix to probe on gcc generated functions on modules. Since probing on a module is based on its symbol name, it should be adjusted on actual symbols. E.g. without this fix, perf probe shows probe definition on non-exist symbol as below. $ perf probe -m build-x86_64/net/netfilter/nf_nat.ko -F in_range* in_range.isra.12 $ perf probe -m build-x86_64/net/netfilter/nf_nat.ko -D in_range p:probe/in_range nf_nat:in_range+0 With this fix, perf probe correctly shows a probe on gcc-generated symbol. $ perf probe -m build-x86_64/net/netfilter/nf_nat.ko -D in_range p:probe/in_range nf_nat:in_range.isra.12+0 This also fixes same problem on online module as below. $ perf probe -m i915 -D assert_plane p:probe/assert_plane i915:assert_plane.constprop.134+0 Signed-off-by: Masami Hiramatsu Tested-by: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/148411450673.9978.14905987549651656075.stgit@devbox Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index d281ae2..1d9c02b 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -645,18 +645,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, return ret; } -static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, - int ntevs, const char *module) +static int +post_process_module_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module, + struct debuginfo *dinfo) { + Dwarf_Addr text_offs = 0; int i, ret = 0; char *mod_name = NULL; + struct map *map; if (!module) return 0; - mod_name = find_module_name(module); + map = get_target_map(module, false); + if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) { + pr_warning("Failed to get ELF symbols for %s\n", module); + return -EINVAL; + } + mod_name = find_module_name(module); for (i = 0; i < ntevs; i++) { + ret = post_process_probe_trace_point(&tevs[i].point, + map, (unsigned long)text_offs); + if (ret < 0) + break; tevs[i].point.module = strdup(mod_name ? mod_name : module); if (!tevs[i].point.module) { @@ -666,6 +679,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, } free(mod_name); + map__put(map); + return ret; } @@ -722,7 +737,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse static int post_process_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event *tevs, int ntevs, const char *module, - bool uprobe) + bool uprobe, struct debuginfo *dinfo) { int ret; @@ -730,7 +745,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev, ret = add_exec_to_probe_trace_events(tevs, ntevs, module); else if (module) /* Currently ref_reloc_sym based probe is not for drivers */ - ret = add_module_to_probe_trace_events(tevs, ntevs, module); + ret = post_process_module_probe_trace_events(tevs, ntevs, + module, dinfo); else ret = post_process_kernel_probe_trace_events(tevs, ntevs); @@ -774,30 +790,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, } } - debuginfo__delete(dinfo); - if (ntevs > 0) { /* Succeeded to find trace events */ pr_debug("Found %d probe_trace_events.\n", ntevs); ret = post_process_probe_trace_events(pev, *tevs, ntevs, - pev->target, pev->uprobes); + pev->target, pev->uprobes, dinfo); if (ret < 0 || ret == ntevs) { + pr_debug("Post processing failed or all events are skipped. (%d)\n", ret); clear_probe_trace_events(*tevs, ntevs); zfree(tevs); + ntevs = 0; } - if (ret != ntevs) - return ret < 0 ? ret : ntevs; - ntevs = 0; - /* Fall through */ } + debuginfo__delete(dinfo); + if (ntevs == 0) { /* No error but failed to find probe point. */ pr_warning("Probe point '%s' not found.\n", synthesize_perf_probe_point(&pev->point)); return -ENOENT; - } - /* Error path : ntevs < 0 */ - pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); - if (ntevs < 0) { + } else if (ntevs < 0) { + /* Error path : ntevs < 0 */ + pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); if (ntevs == -EBADF) pr_warning("Warning: No dwarf info found in the vmlinux - " "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 0278fe1..0d9d6e0 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg, } /* For the kernel module, we need a special code to get a DIE */ -static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset) { int n, i; Elf32_Word shndx; @@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) if (!shdr) return -ENOENT; *offs = shdr->sh_addr; + if (adjust_offset) + *offs -= shdr->sh_offset; } } return 0; @@ -1545,7 +1548,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, int baseline = 0, lineno = 0, ret = 0; /* We always need to relocate the address for aranges */ - if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) + if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0) addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index f1d8558..2956c51 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, struct perf_probe_point *ppt); +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset); + /* Find a line range */ int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); -- cgit v0.10.2 From 399566f8a4fb1ea442046942640e37d9ea9fa0d6 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 16 Jan 2017 18:31:39 +0200 Subject: net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV [ Upstream commit 9577b174cd0323d287c994ef0891db71666d0765 ] When running SRIOV, warnings for SRQ LIMIT events flood the Hypervisor's message log when (correct, normally operating) apps use SRQ LIMIT events as a trigger to post WQEs to SRQs. Add more information to the existing debug printout for SRQ_LIMIT, and output the warning messages only for the SRQ CATAS ERROR event. Fixes: acba2420f9d2 ("mlx4_core: Add wrapper functions and comm channel and slave event support to EQs") Fixes: e0debf9cb50d ("mlx4_core: Reduce warning message for SRQ_LIMIT event to debug level") Signed-off-by: Jack Morgenstein Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cd3638e..0509996 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", + __func__, be32_to_cpu(eqe->event.srq.srqn), + eq->eqn); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq->eqn, eq->cons_index, ret); break; } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", - __func__, slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } -- cgit v0.10.2 From 168bd51ec5efbb92eb9bcdefb1327ef22e4898a9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 7 Feb 2017 20:56:08 +0800 Subject: sctp: check af before verify address in sctp_addr_id2transport [ Upstream commit 912964eacb111551db73429719eb5fadcab0ff8a ] Commit 6f29a1306131 ("sctp: sctp_addr_id2transport should verify the addr before looking up assoc") invoked sctp_verify_addr to verify the addr. But it didn't check af variable beforehand, once users pass an address with family = 0 through sockopt, sctp_get_af_specific will return NULL and NULL pointer dereference will be caused by af->sockaddr_len. This patch is to fix it by returning NULL if af variable is NULL. Fixes: 6f29a1306131 ("sctp: sctp_addr_id2transport should verify the addr before looking up assoc") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 487c127..9647e31 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; - if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) + if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, -- cgit v0.10.2 From adfe95fe5b4290693a57f1682fcf3c4f61951086 Mon Sep 17 00:00:00 2001 From: Peter Dawson Date: Fri, 26 May 2017 06:35:18 +1000 Subject: ip6_tunnel, ip6_gre: fix setting of DSCP on encapsulated packets [ Upstream commit 0e9a709560dbcfbace8bf4019dc5298619235891 ] This fix addresses two problems in the way the DSCP field is formulated on the encapsulating header of IPv6 tunnels. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=195661 1) The IPv6 tunneling code was manipulating the DSCP field of the encapsulating packet using the 32b flowlabel. Since the flowlabel is only the lower 20b it was incorrect to assume that the upper 12b containing the DSCP and ECN fields would remain intact when formulating the encapsulating header. This fix handles the 'inherit' and 'fixed-value' DSCP cases explicitly using the extant dsfield u8 variable. 2) The use of INET_ECN_encapsulate(0, dsfield) in ip6_tnl_xmit was incorrect and resulted in the DSCP value always being set to 0. Commit 90427ef5d2a4 ("ipv6: fix flow labels when the traffic class is non-0") caused the regression by masking out the flowlabel which exposed the incorrect handling of the DSCP portion of the flowlabel in ip6_tunnel and ip6_gre. Fixes: 90427ef5d2a4 ("ipv6: fix flow labels when the traffic class is non-0") Signed-off-by: Peter Dawson Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index a5fdc1a..d2844ee 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -542,11 +542,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - dsfield = ipv4_get_dsfield(iph); - if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) - & IPV6_TCLASS_MASK; + dsfield = ipv4_get_dsfield(iph); + else + dsfield = ip6_tclass(t->parms.flowinfo); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; @@ -599,9 +598,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); + dsfield = ipv6_get_dsfield(ipv6h); + else + dsfield = ip6_tclass(t->parms.flowinfo); + if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) fl6.flowlabel |= ip6_flowlabel(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 63fad24..1fc9daa 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1196,7 +1196,7 @@ route_lookup: skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); - ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), + ip6_flow_hdr(ipv6h, dsfield, ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); ipv6h->hop_limit = hop_limit; ipv6h->nexthdr = proto; @@ -1231,8 +1231,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (tproto != IPPROTO_IPIP && tproto != 0) return -1; - dsfield = ipv4_get_dsfield(iph); - if (t->parms.collect_md) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; @@ -1246,6 +1244,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPIP; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; + dsfield = ip6_tclass(key->label); } else { if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; @@ -1254,8 +1253,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPIP; if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) - & IPV6_TCLASS_MASK; + dsfield = ipv4_get_dsfield(iph); + else + dsfield = ip6_tclass(t->parms.flowinfo); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; } @@ -1263,6 +1263,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1; + dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph)); + skb_set_inner_ipproto(skb, IPPROTO_IPIP); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, @@ -1296,8 +1298,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ip6_tnl_addr_conflict(t, ipv6h)) return -1; - dsfield = ipv6_get_dsfield(ipv6h); - if (t->parms.collect_md) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; @@ -1311,6 +1311,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPV6; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; + dsfield = ip6_tclass(key->label); } else { offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ @@ -1333,7 +1334,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPV6; if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); + dsfield = ipv6_get_dsfield(ipv6h); + else + dsfield = ip6_tclass(t->parms.flowinfo); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) fl6.flowlabel |= ip6_flowlabel(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) @@ -1343,6 +1346,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1; + dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h)); + skb_set_inner_ipproto(skb, IPPROTO_IPV6); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, -- cgit v0.10.2 From f9f73c58feefa8a5dda019df9c549c6e355e15be Mon Sep 17 00:00:00 2001 From: Eugeniu Rosca Date: Tue, 6 Jun 2017 00:08:10 +0200 Subject: ravb: Fix use-after-free on `ifconfig eth0 down` [ Upstream commit 79514ef670e9e575a1fe36922268c439d0f0ca8a ] Commit a47b70ea86bd ("ravb: unmap descriptors when freeing rings") has introduced the issue seen in [1] reproduced on H3ULCB board. Fix this by relocating the RX skb ringbuffer free operation, so that swiotlb page unmapping can be done first. Freeing of aligned TX buffers is not relevant to the issue seen in [1]. Still, reposition TX free calls as well, to have all kfree() operations performed consistently _after_ dma_unmap_*()/dma_free_*(). [1] Console screenshot with the problem reproduced: salvator-x login: root root@salvator-x:~# ifconfig eth0 up Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: \ attached PHY driver [Micrel KSZ9031 Gigabit PHY] \ (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=235) IPv6: ADDRCONF(NETDEV_UP): eth0: link is not ready root@salvator-x:~# root@salvator-x:~# ifconfig eth0 down ================================================================== BUG: KASAN: use-after-free in swiotlb_tbl_unmap_single+0xc4/0x35c Write of size 1538 at addr ffff8006d884f780 by task ifconfig/1649 CPU: 0 PID: 1649 Comm: ifconfig Not tainted 4.12.0-rc4-00004-g112eb07287d1 #32 Hardware name: Renesas H3ULCB board based on r8a7795 (DT) Call trace: [] dump_backtrace+0x0/0x3a4 [] show_stack+0x14/0x1c [] dump_stack+0xf8/0x150 [] print_address_description+0x7c/0x330 [] kasan_report+0x2e0/0x2f4 [] check_memory_region+0x20/0x14c [] memcpy+0x48/0x68 [] swiotlb_tbl_unmap_single+0xc4/0x35c [] unmap_single+0x90/0xa4 [] swiotlb_unmap_page+0xc/0x14 [] __swiotlb_unmap_page+0xcc/0xe4 [] ravb_ring_free+0x514/0x870 [] ravb_close+0x288/0x36c [] __dev_close_many+0x14c/0x174 [] __dev_close+0xc8/0x144 [] __dev_change_flags+0xd8/0x194 [] dev_change_flags+0x60/0xb0 [] devinet_ioctl+0x484/0x9d4 [] inet_ioctl+0x190/0x194 [] sock_do_ioctl+0x78/0xa8 [] sock_ioctl+0x110/0x3c4 [] vfs_ioctl+0x90/0xa0 [] do_vfs_ioctl+0x148/0xc38 [] SyS_ioctl+0x44/0x74 [] el0_svc_naked+0x24/0x28 The buggy address belongs to the page: page:ffff7e001b6213c0 count:0 mapcount:0 mapping: (null) index:0x0 flags: 0x4000000000000000() raw: 4000000000000000 0000000000000000 0000000000000000 00000000ffffffff raw: 0000000000000000 ffff7e001b6213e0 0000000000000000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8006d884f680: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff8006d884f700: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff >ffff8006d884f780: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffff8006d884f800: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff8006d884f880: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ================================================================== Disabling lock debugging due to kernel taint root@salvator-x:~# Fixes: a47b70ea86bd ("ravb: unmap descriptors when freeing rings") Signed-off-by: Eugeniu Rosca Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 510ff62..11623aa 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -229,18 +229,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) int ring_size; int i; - /* Free RX skb ringbuffer */ - if (priv->rx_skb[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) - dev_kfree_skb(priv->rx_skb[q][i]); - } - kfree(priv->rx_skb[q]); - priv->rx_skb[q] = NULL; - - /* Free aligned TX buffers */ - kfree(priv->tx_align[q]); - priv->tx_align[q] = NULL; - if (priv->rx_ring[q]) { for (i = 0; i < priv->num_rx_ring[q]; i++) { struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; @@ -269,6 +257,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_ring[q] = NULL; } + /* Free RX skb ringbuffer */ + if (priv->rx_skb[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) + dev_kfree_skb(priv->rx_skb[q][i]); + } + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + /* Free TX skb ringbuffer. * SKBs are freed by ravb_tx_free() call above. */ -- cgit v0.10.2 From 647f605276c0b5e3019fcf8ad302d217d87adedc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 23 Jun 2017 15:08:41 -0700 Subject: mm/vmalloc.c: huge-vmap: fail gracefully on unexpected huge vmap mappings commit 029c54b09599573015a5c18dbe59cbdf42742237 upstream. Existing code that uses vmalloc_to_page() may assume that any address for which is_vmalloc_addr() returns true may be passed into vmalloc_to_page() to retrieve the associated struct page. This is not un unreasonable assumption to make, but on architectures that have CONFIG_HAVE_ARCH_HUGE_VMAP=y, it no longer holds, and we need to ensure that vmalloc_to_page() does not go off into the weeds trying to dereference huge PUDs or PMDs as table entries. Given that vmalloc() and vmap() themselves never create huge mappings or deal with compound pages at all, there is no correct answer in this case, so return NULL instead, and issue a warning. When reading /proc/kcore on arm64, you will hit an oops as soon as you hit the huge mappings used for the various segments that make up the mapping of vmlinux. With this patch applied, you will no longer hit the oops, but the kcore contents willl be incorrect (these regions will be zeroed out) We are fixing this for kcore specifically, so it avoids vread() for those regions. At least one other problematic user exists, i.e., /dev/kmem, but that is currently broken on arm64 for other reasons. Link: http://lkml.kernel.org/r/20170609082226.26152-1-ard.biesheuvel@linaro.org Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Reviewed-by: Laura Abbott Cc: Michal Hocko Cc: zhong jiang Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds [ardb: non-trivial backport to v4.9] Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f2481cb..195de42 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -244,11 +244,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); + /* + * Don't dereference bad PUD or PMD (below) entries. This will also + * identify huge mappings, which we may encounter on architectures + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be + * identified as vmalloc addresses by is_vmalloc_addr(), but are + * not [unambiguously] associated with a struct page, so there is + * no correct value to return for them. + */ if (!pgd_none(*pgd)) { pud_t *pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { + WARN_ON_ONCE(pud_bad(*pud)); + if (!pud_none(*pud) && !pud_bad(*pud)) { pmd_t *pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { + WARN_ON_ONCE(pmd_bad(*pmd)); + if (!pmd_none(*pmd) && !pmd_bad(*pmd)) { pte_t *ptep, pte; ptep = pte_offset_map(pmd, addr); -- cgit v0.10.2 From 1e1666257cb69022e7a6fe61b1cf041a852ce1bc Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 3 May 2017 16:43:19 +0200 Subject: xfrm: fix stack access out of bounds with CONFIG_XFRM_SUB_POLICY commit 9b3eb54106cf6acd03f07cf0ab01c13676a226c2 upstream. When CONFIG_XFRM_SUB_POLICY=y, xfrm_dst stores a copy of the flowi for that dst. Unfortunately, the code that allocates and fills this copy doesn't care about what type of flowi (flowi, flowi4, flowi6) gets passed. In multiple code paths (from raw_sendmsg, from TCP when replying to a FIN, in vxlan, geneve, and gre), the flowi that gets passed to xfrm is actually an on-stack flowi4, so we end up reading stuff from the stack past the end of the flowi4 struct. Since xfrm_dst->origin isn't used anywhere following commit ca116922afa8 ("xfrm: Eliminate "fl" and "pol" args to xfrm_bundle_ok()."), just get rid of it. xfrm_dst->partner isn't used either, so get rid of that too. Fixes: 9d6ec938019c ("ipv4: Use flowi4 in public route lookup interfaces.") Signed-off-by: Sabrina Dubroca Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 31947b9..835c30e 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -944,10 +944,6 @@ struct xfrm_dst { struct flow_cache_object flo; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; -#ifdef CONFIG_XFRM_SUB_POLICY - struct flowi *origin; - struct xfrm_selector *partner; -#endif u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; @@ -963,12 +959,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) dst_release(xdst->route); if (likely(xdst->u.dst.xfrm)) xfrm_state_put(xdst->u.dst.xfrm); -#ifdef CONFIG_XFRM_SUB_POLICY - kfree(xdst->origin); - xdst->origin = NULL; - kfree(xdst->partner); - xdst->partner = NULL; -#endif } #endif diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e0437a7..8da67f7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1808,43 +1808,6 @@ free_dst: goto out; } -#ifdef CONFIG_XFRM_SUB_POLICY -static int xfrm_dst_alloc_copy(void **target, const void *src, int size) -{ - if (!*target) { - *target = kmalloc(size, GFP_ATOMIC); - if (!*target) - return -ENOMEM; - } - - memcpy(*target, src, size); - return 0; -} -#endif - -static int xfrm_dst_update_parent(struct dst_entry *dst, - const struct xfrm_selector *sel) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->partner), - sel, sizeof(*sel)); -#else - return 0; -#endif -} - -static int xfrm_dst_update_origin(struct dst_entry *dst, - const struct flowi *fl) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); -#else - return 0; -#endif -} - static int xfrm_expand_policies(const struct flowi *fl, u16 family, struct xfrm_policy **pols, int *num_pols, int *num_xfrms) @@ -1916,16 +1879,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, xdst = (struct xfrm_dst *)dst; xdst->num_xfrms = err; - if (num_pols > 1) - err = xfrm_dst_update_parent(dst, &pols[1]->selector); - else - err = xfrm_dst_update_origin(dst, fl); - if (unlikely(err)) { - dst_free(dst); - XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); - return ERR_PTR(err); - } - xdst->num_pols = num_pols; memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); -- cgit v0.10.2 From c460f2beb6f081fa22eb7291db49c13c266ffd86 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Jun 2017 13:35:37 +0300 Subject: xfrm: NULL dereference on allocation failure commit e747f64336fc15e1c823344942923195b800aa1e upstream. The default error code in pfkey_msg2xfrm_state() is -ENOBUFS. We added a new call to security_xfrm_state_alloc() which sets "err" to zero so there several places where we can return ERR_PTR(0) if kmalloc() fails. The caller is expecting error pointers so it leads to a NULL dereference. Fixes: df71837d5024 ("[LSM-IPSec]: Security association restriction.") Signed-off-by: Dan Carpenter Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman diff --git a/net/key/af_key.c b/net/key/af_key.c index f9c9ecb..4f59929 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } + err = -ENOBUFS; key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (sa->sadb_sa_auth) { int keysize = 0; -- cgit v0.10.2 From ac2730234cc1454b901656ed7f59ca1b519cdaf1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Jun 2017 13:34:05 +0300 Subject: xfrm: Oops on error in pfkey_msg2xfrm_state() commit 1e3d0c2c70cd3edb5deed186c5f5c75f2b84a633 upstream. There are some missing error codes here so we accidentally return NULL instead of an error pointer. It results in a NULL pointer dereference. Fixes: df71837d5024 ("[LSM-IPSec]: Security association restriction.") Signed-off-by: Dan Carpenter Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman diff --git a/net/key/af_key.c b/net/key/af_key.c index 4f59929..e67c28e 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1147,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); - if (!x->aalg) + if (!x->aalg) { + err = -ENOMEM; goto out; + } strcpy(x->aalg->alg_name, a->name); x->aalg->alg_key_len = 0; if (key) { @@ -1167,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); - if (!x->calg) + if (!x->calg) { + err = -ENOMEM; goto out; + } strcpy(x->calg->alg_name, a->name); x->props.calgo = sa->sadb_sa_encrypt; } else { @@ -1182,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); - if (!x->ealg) + if (!x->ealg) { + err = -ENOMEM; goto out; + } strcpy(x->ealg->alg_name, a->name); x->ealg->alg_key_len = 0; if (key) { @@ -1228,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, struct xfrm_encap_tmpl *natt; x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); - if (!x->encap) + if (!x->encap) { + err = -ENOMEM; goto out; + } natt = x->encap; n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; -- cgit v0.10.2 From 4211442b2088554f1c99a72b0476f967c0509a0e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 17 Feb 2017 08:39:28 +0100 Subject: netfilter: use skb_to_full_sk in ip_route_me_harder commit 29e09229d9f26129a39462fae0ddabc4d9533989 upstream. inet_sk(skb->sk) is illegal in case skb is attached to request socket. Fixes: ca6fb0651883 ("tcp: attach SYNACK messages to request sockets instead of listener") Reported by: Daniel J Blueman Signed-off-by: Florian Westphal Tested-by: Daniel J Blueman Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index b3cc133..c0cc6aa 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; + const struct sock *sk = skb_to_full_sk(skb); + __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; struct net_device *dev = skb_dst(skb)->dev; unsigned int hh_len; @@ -40,7 +41,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; + fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; if (!fl4.flowi4_oif) fl4.flowi4_oif = l3mdev_master_ifindex(dev); fl4.flowi4_mark = skb->mark; @@ -61,7 +62,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); - dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); + dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); -- cgit v0.10.2 From eea0261db8efda7a5b3732c0d9a76e9b06bf040d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 27 Apr 2017 18:02:32 -0700 Subject: watchdog: bcm281xx: Fix use of uninitialized spinlock. commit fedf266f9955d9a019643cde199a2fd9a0259f6f upstream. The bcm_kona_wdt_set_resolution_reg() call takes the spinlock, so initialize it earlier. Fixes a warning at boot with lock debugging enabled. Fixes: 6adb730dc208 ("watchdog: bcm281xx: Watchdog Driver") Signed-off-by: Eric Anholt Reviewed-by: Florian Fainelli Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c index e0c9842..11a72bc 100644 --- a/drivers/watchdog/bcm_kona_wdt.c +++ b/drivers/watchdog/bcm_kona_wdt.c @@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) if (!wdt) return -ENOMEM; + spin_lock_init(&wdt->lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); wdt->base = devm_ioremap_resource(dev, res); if (IS_ERR(wdt->base)) @@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) return ret; } - spin_lock_init(&wdt->lock); platform_set_drvdata(pdev, wdt); watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); bcm_kona_wdt_wdd.parent = &pdev->dev; -- cgit v0.10.2 From 478273e11521915b7a0fd977b4d43587997ec7b2 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 17 Feb 2017 12:07:30 +0000 Subject: sched/loadavg: Avoid loadavg spikes caused by delayed NO_HZ accounting commit 6e5f32f7a43f45ee55c401c0b9585eb01f9629a8 upstream. If we crossed a sample window while in NO_HZ we will add LOAD_FREQ to the pending sample window time on exit, setting the next update not one window into the future, but two. This situation on exiting NO_HZ is described by: this_rq->calc_load_update < jiffies < calc_load_update In this scenario, what we should be doing is: this_rq->calc_load_update = calc_load_update [ next window ] But what we actually do is: this_rq->calc_load_update = calc_load_update + LOAD_FREQ [ next+1 window ] This has the effect of delaying load average updates for potentially up to ~9seconds. This can result in huge spikes in the load average values due to per-cpu uninterruptible task counts being out of sync when accumulated across all CPUs. It's safe to update the per-cpu active count if we wake between sample windows because any load that we left in 'calc_load_idle' will have been zero'd when the idle load was folded in calc_global_load(). This issue is easy to reproduce before, commit 9d89c257dfb9 ("sched/fair: Rewrite runnable load and utilization average tracking") just by forking short-lived process pipelines built from ps(1) and grep(1) in a loop. I'm unable to reproduce the spikes after that commit, but the bug still seems to be present from code review. Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Mike Galbraith Cc: Mike Galbraith Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Fixes: commit 5167e8d ("sched/nohz: Rewrite and fix load-avg computation -- again") Link: http://lkml.kernel.org/r/20170217120731.11868-2-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index a2d6eb7..ec91fcc 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -201,8 +201,9 @@ void calc_load_exit_idle(void) struct rq *this_rq = this_rq(); /* - * If we're still before the sample window, we're done. + * If we're still before the pending sample window, we're done. */ + this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update)) return; @@ -211,7 +212,6 @@ void calc_load_exit_idle(void) * accounted through the nohz accounting, so skip the entire deal and * sync up for the next window. */ - this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update + 10)) this_rq->calc_load_update += LOAD_FREQ; } -- cgit v0.10.2 From c52829f60f5f6e228a70162717df199e874898a8 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 27 Jan 2017 00:21:53 +0800 Subject: spi: When no dma_chan map buffers with spi_master's parent commit 88b0aa544af58ce3be125a1845a227264ec9ab89 upstream. Back before commit 1dccb598df54 ("arm64: simplify dma_get_ops"), for arm64, devices for which dma_ops were not explicitly set were automatically configured to use swiotlb_dma_ops, since this was hard-coded as the global "dma_ops" in arm64_dma_init(). Now that global "dma_ops" has been removed, all devices much have their dma_ops explicitly set by a call to arch_setup_dma_ops(), otherwise the device is assigned dummy_dma_ops, and thus calls to map_sg for such a device will fail (return 0). Mediatek SPI uses DMA but does not use a dma channel. Support for this was added by commit c37f45b5f1cd ("spi: support spi without dma channel to use can_dma()"), which uses the master_spi dev to DMA map buffers. The master_spi device is not a platform device, rather it is created in spi_alloc_device(), and therefore its dma_ops are never set. Therefore, when the mediatek SPI driver when it does DMA (for large SPI transactions > 32 bytes), SPI will use spi_map_buf()->dma_map_sg() to map the buffer for use in DMA. But dma_map_sg()->dma_map_sg_attrs() returns 0, because ops->map_sg is dummy_dma_ops->__dummy_map_sg, and hence spi_map_buf() returns -ENOMEM (-12). Fix this by using the real spi_master's parent device which should be a real physical device with DMA properties. Signed-off-by: Daniel Kurtz Fixes: c37f45b5f1cd ("spi: support spi without dma channel to use can_dma()") Cc: Leilk Liu Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 24d4492..ddc39b4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -797,12 +797,12 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) if (master->dma_tx) tx_dev = master->dma_tx->device->dev; else - tx_dev = &master->dev; + tx_dev = master->dev.parent; if (master->dma_rx) rx_dev = master->dma_rx->device->dev; else - rx_dev = &master->dev; + rx_dev = master->dev.parent; list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (!master->can_dma(master, msg->spi, xfer)) @@ -844,12 +844,12 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) if (master->dma_tx) tx_dev = master->dma_tx->device->dev; else - tx_dev = &master->dev; + tx_dev = master->dev.parent; if (master->dma_rx) rx_dev = master->dma_rx->device->dev; else - rx_dev = &master->dev; + rx_dev = master->dev.parent; list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (!master->can_dma(master, msg->spi, xfer)) -- cgit v0.10.2 From 9846c67974d6af64f665707bb4f68ae458684faa Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 30 Jan 2017 17:47:05 +0100 Subject: spi: fix device-node leaks commit 8324147f38019865b29d03baf28412d2ec0bd828 upstream. Make sure to release the device-node reference taken in of_register_spi_device() on errors and when deregistering the device. Fixes: 284b01897340 ("spi: Add OF binding support for SPI busses") Signed-off-by: Johan Hovold Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ddc39b4..6db8063 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -621,8 +621,10 @@ void spi_unregister_device(struct spi_device *spi) if (!spi) return; - if (spi->dev.of_node) + if (spi->dev.of_node) { of_node_clear_flag(spi->dev.of_node, OF_POPULATED); + of_node_put(spi->dev.of_node); + } if (ACPI_COMPANION(&spi->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); device_unregister(&spi->dev); @@ -1589,11 +1591,13 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc) if (rc) { dev_err(&master->dev, "spi_device register error %s\n", nc->full_name); - goto err_out; + goto err_of_node_put; } return spi; +err_of_node_put: + of_node_put(nc); err_out: spi_dev_put(spi); return ERR_PTR(rc); -- cgit v0.10.2 From 88baad2e715967f237396bea47c496830d82a9c2 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Fri, 10 Feb 2017 11:55:46 -0600 Subject: regulator: tps65086: Fix expected switch DT node names commit 1c47f7c316de38c30b481e1886cc6352c9efdcc1 upstream. The three load switches are called SWA1, SWB1, and SWB2. The node names describing properties for these are expected to be the same, but due to a typo they are not. Fix this here. Fixes: d2a2e729a666 ("regulator: tps65086: Add regulator driver for the TPS65086 PMIC") Reported-by: Steven Kipisz Signed-off-by: Andrew F. Davis Tested-by: Steven Kipisz Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c index caf174f..0e75f57 100644 --- a/drivers/regulator/tps65086-regulator.c +++ b/drivers/regulator/tps65086-regulator.c @@ -156,8 +156,8 @@ static struct tps65086_regulator regulators[] = { VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0), tps65086_ldoa23_ranges, 0, 0), TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)), - TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)), - TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)), + TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)), + TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)), TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)), }; -- cgit v0.10.2 From e57aa416ca4ce2af2570f3b776d738c04d9a8e3e Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Fri, 10 Feb 2017 11:55:47 -0600 Subject: regulator: tps65086: Fix DT node referencing in of_parse_cb commit 6308f1787fb85bc98b7241a08a9f7f33b47f8b61 upstream. When we check for additional DT properties in the current node we use the device_node passed in with the configuration data, this will not point to the correct DT node, use the one passed in for this purpose. Fixes: d2a2e729a666 ("regulator: tps65086: Add regulator driver for the TPS65086 PMIC") Reported-by: Steven Kipisz Signed-off-by: Andrew F. Davis Tested-by: Steven Kipisz Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c index 0e75f57..6dbf3cf 100644 --- a/drivers/regulator/tps65086-regulator.c +++ b/drivers/regulator/tps65086-regulator.c @@ -161,14 +161,14 @@ static struct tps65086_regulator regulators[] = { TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)), }; -static int tps65086_of_parse_cb(struct device_node *dev, +static int tps65086_of_parse_cb(struct device_node *node, const struct regulator_desc *desc, struct regulator_config *config) { int ret; /* Check for 25mV step mode */ - if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) { + if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) { switch (desc->id) { case BUCK1: case BUCK2: @@ -192,7 +192,7 @@ static int tps65086_of_parse_cb(struct device_node *dev, } /* Check for decay mode */ - if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) { + if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) { ret = regmap_write_bits(config->regmap, regulators[desc->id].decay_reg, regulators[desc->id].decay_mask, -- cgit v0.10.2 From 07bb2c7e7ea369f03a8893e445639324726680a5 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Thu, 30 Mar 2017 14:58:18 -0500 Subject: ARM: OMAP2+: omap_device: Sync omap_device and pm_runtime after probe defer commit 04abaf07f6d5cdf22b7a478a86e706dfeeeef960 upstream. Starting from commit 5de85b9d57ab ("PM / runtime: Re-init runtime PM states at probe error and driver unbind") pm_runtime core now changes device runtime_status back to after RPM_SUSPENDED after a probe defer. Certain OMAP devices make use of "ti,no-idle-on-init" flag which causes omap_device_enable to be called during the BUS_NOTIFY_ADD_DEVICE event during probe, along with pm_runtime_set_active. This call to pm_runtime_set_active typically will prevent a call to pm_runtime_get in a driver probe function from re-enabling the omap_device. However, in the case of a probe defer that happens before the driver probe function is able to run, such as a missing pinctrl states defer, pm_runtime_reinit will set the device as RPM_SUSPENDED and then once driver probe is actually able to run, pm_runtime_get will see the device as suspended and call through to the omap_device layer, attempting to enable the already enabled omap_device and causing errors like this: omap-gpmc 50000000.gpmc: omap_device: omap_device_enable() called from invalid state 1 omap-gpmc 50000000.gpmc: use pm_runtime_put_sync_suspend() in driver? We can avoid this error by making sure the pm_runtime status of a device matches the omap_device state before a probe attempt. By extending the omap_device bus notifier to act on the BUS_NOTIFY_BIND_DRIVER event we can check if a device is enabled in omap_device but with a pm_runtime status of RPM_SUSPENDED and once again mark the device as RPM_ACTIVE to avoid a second incorrect call to omap_device_enable. Fixes: 5de85b9d57ab ("PM / runtime: Re-init runtime PM states at probe error and driver unbind") Tested-by: Franklin S Cooper Jr. Signed-off-by: Dave Gerlach Signed-off-by: Tony Lindgren Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index e920dd8..f989145 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb, dev_err(dev, "failed to idle\n"); } break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && + pm_runtime_status_suspended(dev)) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; + pm_runtime_set_active(dev); + } + break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); -- cgit v0.10.2 From 4efe34b500a740016e5eabb8114ceeb395af771e Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Mon, 6 Mar 2017 12:56:55 -0600 Subject: ARM: dts: OMAP3: Fix MFG ID EEPROM commit 06e1a5cc570703796ff1bd3a712e8e3b15c6bb0d upstream. The manufacturing information is stored in the EEPROM. This chip is an AT24C64 not not (nor has it ever been) 24C02. This patch will correctly address the EEPROM to read the entire contents and not just 256 bytes (of 0xff). Fixes: 5e3447a29a38 ("ARM: dts: LogicPD Torpedo: Add AT24 EEPROM Support") Signed-off-by: Adam Ford Signed-off-by: Tony Lindgren Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 8f9a69c..efe5399 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -121,7 +121,7 @@ &i2c3 { clock-frequency = <400000>; at24@50 { - compatible = "at24,24c02"; + compatible = "atmel,24c64"; readonly; reg = <0x50>; }; -- cgit v0.10.2 From 7661b19687b2399783de2c00cf88981c93bc8383 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 26 May 2017 17:40:02 +0100 Subject: ARM64/ACPI: Fix BAD_MADT_GICC_ENTRY() macro implementation commit cb7cf772d83d2d4e6995c5bb9e0fb59aea8f7080 upstream. The BAD_MADT_GICC_ENTRY() macro checks if a GICC MADT entry passes muster from an ACPI specification standpoint. Current macro detects the MADT GICC entry length through ACPI firmware version (it changed from 76 to 80 bytes in the transition from ACPI 5.1 to ACPI 6.0 specification) but always uses (erroneously) the ACPICA (latest) struct (ie struct acpi_madt_generic_interrupt - that is 80-bytes long) length to check if the current GICC entry memory record exceeds the MADT table end in memory as defined by the MADT table header itself, which may result in false negatives depending on the ACPI firmware version and how the MADT entries are laid out in memory (ie on ACPI 5.1 firmware MADT GICC entries are 76 bytes long, so by adding 80 to a GICC entry start address in memory the resulting address may well be past the actual MADT end, triggering a false negative). Fix the BAD_MADT_GICC_ENTRY() macro by reshuffling the condition checks and update them to always use the firmware version specific MADT GICC entry length in order to carry out boundary checks. Fixes: b6cfb277378e ("ACPI / ARM64: add BAD_MADT_GICC_ENTRY() macro") Reported-by: Julien Grall Acked-by: Will Deacon Acked-by: Marc Zyngier Signed-off-by: Lorenzo Pieralisi Cc: Julien Grall Cc: Hanjun Guo Cc: Al Stone Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index e517088..de04879 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -22,9 +22,9 @@ #define ACPI_MADT_GICC_LENGTH \ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) -#define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ - (entry)->header.length != ACPI_MADT_GICC_LENGTH) +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI -- cgit v0.10.2 From a2c222bef08f1ada42f85f12114f482a0682ea56 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Thu, 29 Jun 2017 18:41:36 +0100 Subject: ARM: 8685/1: ensure memblock-limit is pmd-aligned commit 9e25ebfe56ece7541cd10a20d715cbdd148a2e06 upstream. The pmd containing memblock_limit is cleared by prepare_page_table() which creates the opportunity for early_alloc() to allocate unmapped memory if memblock_limit is not pmd aligned causing a boot-time hang. Commit 965278dcb8ab ("ARM: 8356/1: mm: handle non-pmd-aligned end of RAM") attempted to resolve this problem, but there is a path through the adjust_lowmem_bounds() routine where if all memory regions start and end on pmd-aligned addresses the memblock_limit will be set to arm_lowmem_limit. Since arm_lowmem_limit can be affected by the vmalloc early parameter, the value of arm_lowmem_limit may not be pmd-aligned. This commit corrects this oversight such that memblock_limit is always rounded down to pmd-alignment. Fixes: 965278dcb8ab ("ARM: 8356/1: mm: handle non-pmd-aligned end of RAM") Signed-off-by: Doug Berger Suggested-by: Mark Rutland Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5cbfd9f..f7c7413 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1211,15 +1211,15 @@ void __init adjust_lowmem_bounds(void) high_memory = __va(arm_lowmem_limit - 1) + 1; + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + /* * Round the memblock limit down to a pmd size. This * helps to ensure that we will allocate memory from the * last full pmd, which should be mapped. */ - if (memblock_limit) - memblock_limit = round_down(memblock_limit, PMD_SIZE); - if (!memblock_limit) - memblock_limit = arm_lowmem_limit; + memblock_limit = round_down(memblock_limit, PMD_SIZE); if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { if (memblock_end_of_DRAM() > arm_lowmem_limit) { -- cgit v0.10.2 From 15541e64163c0c5a2d2e3e8d1b73057888170f62 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 24 Apr 2017 11:58:54 -0300 Subject: tools arch: Sync arch/x86/lib/memcpy_64.S with the kernel commit e883d09c9eb2ffddfd057c17e6a0cef446ec8c9b upstream. Just a minor fix done in: Fixes: 26a37ab319a2 ("x86/mce: Fix copy/paste error in exception table entries") Cc: Tony Luck Link: http://lkml.kernel.org/n/tip-ni9jzdd5yxlail6pq8cuexw2@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Greg Kroah-Hartman diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 49e6eba..98dcc11 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -286,7 +286,7 @@ ENDPROC(memcpy_mcsafe_unrolled) _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) + _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) -- cgit v0.10.2 From b287ade87c9192b4ae6fe525eaa66fd25455bfb1 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 27 Jun 2017 20:39:06 +0800 Subject: x86/boot/KASLR: Fix kexec crash due to 'virt_addr' calculation bug commit 8eabf42ae5237e6b699aeac687b5b629e3537c8d upstream. Kernel text KASLR is separated into physical address and virtual address randomization. And for virtual address randomization, we only randomiza to get an offset between 16M and KERNEL_IMAGE_SIZE. So the initial value of 'virt_addr' should be LOAD_PHYSICAL_ADDR, but not the original kernel loading address 'output'. The bug will cause kernel boot failure if kernel is loaded at a different position than the address, 16M, which is decided at compiled time. Kexec/kdump is such practical case. To fix it, just assign LOAD_PHYSICAL_ADDR to virt_addr as initial value. Tested-by: Dave Young Signed-off-by: Baoquan He Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 8391c73 ("x86/KASLR: Randomize virtual address separately") Link: http://lkml.kernel.org/r/1498567146-11990-3-git-send-email-bhe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index a66854d..6de58f1 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -430,9 +430,6 @@ void choose_random_location(unsigned long input, { unsigned long random_addr, min_addr; - /* By default, keep output position unchanged. */ - *virt_addr = *output; - if (cmdline_find_option_bool("nokaslr")) { warn("KASLR disabled: 'nokaslr' on cmdline."); return; diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b3c5a5f0..c945acd 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, unsigned long output_len) { const unsigned long kernel_total_size = VO__end - VO__text; - unsigned long virt_addr = (unsigned long)output; + unsigned long virt_addr = LOAD_PHYSICAL_ADDR; /* Retain x86 boot parameters pointer passed from startup_32/64. */ boot_params = rmode; @@ -397,7 +397,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, #ifndef CONFIG_RELOCATABLE if ((unsigned long)output != LOAD_PHYSICAL_ADDR) error("Destination address does not match LOAD_PHYSICAL_ADDR"); - if ((unsigned long)output != virt_addr) + if (virt_addr != LOAD_PHYSICAL_ADDR) error("Destination virtual address changed when not relocatable"); #endif diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 1c8355e..766a521 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input, unsigned long output_size, unsigned long *virt_addr) { - /* No change from existing output location. */ - *virt_addr = *output; } #endif -- cgit v0.10.2 From 3667dafd6c04b46a827398b62fa97b9cf73d32f5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 6 Apr 2017 16:19:22 +0200 Subject: x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space commit 5ed386ec09a5d75bcf073967e55e895c2607a5c3 upstream. When this function fails it just sends a SIGSEGV signal to user-space using force_sig(). This signal is missing essential information about the cause, e.g. the trap_nr or an error code. Fix this by propagating the error to the only caller of mpx_handle_bd_fault(), do_bounds(), which sends the correct SIGSEGV signal to the process. Signed-off-by: Joerg Roedel Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: fe3d197f84319 ('x86, mpx: On-demand kernel allocation of bounds tables') Link: http://lkml.kernel.org/r/1491488362-27198-1-git-send-email-joro@8bytes.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 3e7c489..a75103e 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void) if (!kernel_managing_mpx_tables(current->mm)) return -EINVAL; - if (do_mpx_bt_fault()) { - force_sig(SIGSEGV, current); - /* - * The force_sig() is essentially "handling" this - * exception, so we do not pass up the error - * from do_mpx_bt_fault(). - */ - } - return 0; + return do_mpx_bt_fault(); } /* -- cgit v0.10.2 From 8af88a950b4207f589b210657edc7c94b86b48e8 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 22 Apr 2017 00:01:22 -0700 Subject: x86/mm: Fix flush_tlb_page() on Xen commit dbd68d8e84c606673ebbcf15862f8c155fa92326 upstream. flush_tlb_page() passes a bogus range to flush_tlb_others() and expects the latter to fix it up. native_flush_tlb_others() has the fixup but Xen's version doesn't. Move the fixup to flush_tlb_others(). AFAICS the only real effect is that, without this fix, Xen would flush everything instead of just the one page on remote vCPUs in when flush_tlb_page() was called. Signed-off-by: Andy Lutomirski Reviewed-by: Boris Ostrovsky Cc: Andrew Morton Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Michal Hocko Cc: Nadav Amit Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Fixes: e7b52ffd45a6 ("x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range") Link: http://lkml.kernel.org/r/10ed0e4dfea64daef10b87fb85df1746999b4dba.1492844372.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index a7655f6..75fb011 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -263,8 +263,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, { struct flush_tlb_info info; - if (end == 0) - end = start + PAGE_SIZE; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; @@ -393,7 +391,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); + flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE); preempt_enable(); } -- cgit v0.10.2 From d5c5e8ba5d9d7b3378cf08274c86c8a340110b05 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Wed, 3 May 2017 14:51:41 -0700 Subject: ocfs2: o2hb: revert hb threshold to keep compatible commit 33496c3c3d7b88dcbe5e55aa01288b05646c6aca upstream. Configfs is the interface for ocfs2-tools to set configure to kernel and $configfs_dir/cluster/$clustername/heartbeat/dead_threshold is the one used to configure heartbeat dead threshold. Kernel has a default value of it but user can set O2CB_HEARTBEAT_THRESHOLD in /etc/sysconfig/o2cb to override it. Commit 45b997737a80 ("ocfs2/cluster: use per-attribute show and store methods") changed heartbeat dead threshold name while ocfs2-tools did not, so ocfs2-tools won't set this configurable and the default value is always used. So revert it. Fixes: 45b997737a80 ("ocfs2/cluster: use per-attribute show and store methods") Link: http://lkml.kernel.org/r/1490665245-15374-1-git-send-email-junxiao.bi@oracle.com Signed-off-by: Junxiao Bi Acked-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 636abcb..5e8709a 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2242,13 +2242,13 @@ unlock: spin_unlock(&o2hb_live_lock); } -static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item, +static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", o2hb_dead_threshold); } -static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item, +static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item, const char *page, size_t count) { unsigned long tmp; @@ -2297,11 +2297,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item, } -CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold); +CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold); CONFIGFS_ATTR(o2hb_heartbeat_group_, mode); static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { - &o2hb_heartbeat_group_attr_threshold, + &o2hb_heartbeat_group_attr_dead_threshold, &o2hb_heartbeat_group_attr_mode, NULL, }; -- cgit v0.10.2 From c19bfc6765d44847a3880333474e2c992d63802f Mon Sep 17 00:00:00 2001 From: David Dillow Date: Mon, 30 Jan 2017 19:11:11 -0800 Subject: iommu/vt-d: Don't over-free page table directories commit f7116e115acdd74bc75a4daf6492b11d43505125 upstream. dma_pte_free_level() recurses down the IOMMU page tables and frees directory pages that are entirely contained in the given PFN range. Unfortunately, it incorrectly calculates the starting address covered by the PTE under consideration, which can lead to it clearing an entry that is still in use. This occurs if we have a scatterlist with an entry that has a length greater than 1026 MB and is aligned to 2 MB for both the IOMMU and physical addresses. For example, if __domain_mapping() is asked to map a two-entry scatterlist with 2 MB and 1028 MB segments to PFN 0xffff80000, it will ask if dma_pte_free_pagetable() is asked to PFNs from 0xffff80200 to 0xffffc05ff, it will also incorrectly clear the PFNs from 0xffff80000 to 0xffff801ff because of this issue. The current code will set level_pfn to 0xffff80200, and 0xffff80200-0xffffc01ff fits inside the range being cleared. Properly setting the level_pfn for the current level under consideration catches that this PTE is outside of the range being cleared. This patch also changes the value passed into dma_pte_free_level() when it recurses. This only affects the first PTE of the range being cleared, and is handled by the existing code that ensures we start our cursor no lower than start_pfn. This was found when using dma_map_sg() to map large chunks of contiguous memory, which immediatedly led to faults on the first access of the erroneously-deleted mappings. Fixes: 3269ee0bd668 ("intel-iommu: Fix leaks in pagetable freeing") Reviewed-by: Benjamin Serebrin Signed-off-by: David Dillow Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 87fcbf7..002f8a4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1144,7 +1144,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, if (!dma_pte_present(pte) || dma_pte_superpage(pte)) goto next; - level_pfn = pfn & level_mask(level - 1); + level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); if (level > 2) -- cgit v0.10.2 From d7fcb303d1ee4416a6e4772735cfacc36e86bff7 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 16 Jan 2017 12:58:07 +0000 Subject: iommu: Handle default domain attach failure commit 797a8b4d768c58caac58ee3e8cb36a164d1b7751 upstream. We wouldn't normally expect ops->attach_dev() to fail, but on IOMMUs with limited hardware resources, or generally misconfigured systems, it is certainly possible. We report failure correctly from the external iommu_attach_device() interface, but do not do so in iommu_group_add() when attaching to the default domain. The result of failure there is that the device, group and domain all get left in a broken, part-configured state which leads to weird errors and misbehaviour down the line when IOMMU API calls sort-of-but-don't-quite work. Check the return value of __iommu_attach_device() on the default domain, and refactor the error handling paths to cope with its failure and clean up correctly in such cases. Fixes: e39cb8a3aa98 ("iommu: Make sure a device is always attached to a domain") Reported-by: Punit Agrawal Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9a2f196..87d3060 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -383,36 +383,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); - if (ret) { - kfree(device); - return ret; - } + if (ret) + goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return -ENOMEM; + ret = -ENOMEM; + goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { - kfree(device->name); if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ + kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } - - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return ret; + goto err_free_name; } kobject_get(group->devices_kobj); @@ -424,8 +418,10 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); if (group->domain) - __iommu_attach_device(group->domain, dev); + ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); + if (ret) + goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, @@ -436,6 +432,21 @@ rename: pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); return 0; + +err_put_group: + mutex_lock(&group->mutex); + list_del(&device->list); + mutex_unlock(&group->mutex); + dev->iommu_group = NULL; + kobject_put(group->devices_kobj); +err_free_name: + kfree(device->name); +err_remove_link: + sysfs_remove_link(&dev->kobj, "iommu_group"); +err_free_device: + kfree(device); + pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); + return ret; } EXPORT_SYMBOL_GPL(iommu_group_add_device); -- cgit v0.10.2 From f0c31c674abdf563d2ad5d9ecfcad8d237f939f0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 16 Mar 2017 17:00:17 +0000 Subject: iommu/dma: Don't reserve PCI I/O windows commit 938f1bbe35e3a7cb07e1fa7c512e2ef8bb866bdf upstream. Even if a host controller's CPU-side MMIO windows into PCI I/O space do happen to leak into PCI memory space such that it might treat them as peer addresses, trying to reserve the corresponding I/O space addresses doesn't do anything to help solve that problem. Stop doing a silly thing. Fixes: fade1ec055dc ("iommu/dma: Avoid PCI host bridge windows") Reviewed-by: Eric Auger Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index c5ab866..1520e7f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -112,8 +112,7 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, unsigned long lo, hi; resource_list_for_each_entry(window, &bridge->windows) { - if (resource_type(window->res) != IORESOURCE_MEM && - resource_type(window->res) != IORESOURCE_IO) + if (resource_type(window->res) != IORESOURCE_MEM) continue; lo = iova_pfn(iovad, window->res->start - window->offset); -- cgit v0.10.2 From 0e55856b8f2918f3a6b8caf3c72867ee88f816dd Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sun, 23 Apr 2017 18:23:21 +0800 Subject: iommu/amd: Fix incorrect error handling in amd_iommu_bind_pasid() commit 73dbd4a4230216b6a5540a362edceae0c9b4876b upstream. In function amd_iommu_bind_pasid(), the control flow jumps to label out_free when pasid_state->mm and mm is NULL. And mmput(mm) is called. In function mmput(mm), mm is referenced without validation. This will result in a NULL dereference bug. This patch fixes the bug. Signed-off-by: Pan Bian Fixes: f0aac63b873b ('iommu/amd: Don't hold a reference to mm_struct') Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index f8ed8c9..a0b4ac6 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -695,9 +695,9 @@ out_clear_state: out_unregister: mmu_notifier_unregister(&pasid_state->mn, mm); + mmput(mm); out_free: - mmput(mm); free_pasid_state(pasid_state); out: -- cgit v0.10.2 From 1781a29b31faee2cae9e7f353d8ab99ceb619c15 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Mon, 26 Jun 2017 04:28:04 -0500 Subject: iommu/amd: Fix interrupt remapping when disable guest_mode commit 84a21dbdef0b96d773599c33c2afbb002198d303 upstream. Pass-through devices to VM guest can get updated IRQ affinity information via irq_set_affinity() when not running in guest mode. Currently, AMD IOMMU driver in GA mode ignores the updated information if the pass-through device is setup to use vAPIC regardless of guest_mode. This could cause invalid interrupt remapping. Also, the guest_mode bit should be set and cleared only when SVM updates posted-interrupt interrupt remapping information. Signed-off-by: Suravee Suthikulpanit Cc: Joerg Roedel Fixes: d98de49a53e48 ('iommu/amd: Enable vAPIC interrupt remapping mode by default') Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 11a13b5..41800b6 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3857,11 +3857,9 @@ static void irte_ga_prepare(void *entry, u8 vector, u32 dest_apicid, int devid) { struct irte_ga *irte = (struct irte_ga *) entry; - struct iommu_dev_data *dev_data = search_dev_data(devid); irte->lo.val = 0; irte->hi.val = 0; - irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0; irte->lo.fields_remap.int_type = delivery_mode; irte->lo.fields_remap.dm = dest_mode; irte->hi.fields.vector = vector; @@ -3917,10 +3915,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, struct irte_ga *irte = (struct irte_ga *) entry; struct iommu_dev_data *dev_data = search_dev_data(devid); - if (!dev_data || !dev_data->use_vapic) { + if (!dev_data || !dev_data->use_vapic || + !irte->lo.fields_remap.guest_mode) { irte->hi.fields.vector = vector; irte->lo.fields_remap.destination = dest_apicid; - irte->lo.fields_remap.guest_mode = 0; modify_irte_ga(devid, index, irte, NULL); } } -- cgit v0.10.2 From 8a6f400a374c2366ae2e0a3e528a2c9791b1dcd1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Feb 2017 16:19:06 +0300 Subject: cpufreq: s3c2416: double free on driver init error path commit a69261e4470d680185a15f748d9cdafb37c57a33 upstream. The "goto err_armclk;" error path already does a clk_put(s3c_freq->hclk); so this is a double free. Fixes: 34ee55075265 ([CPUFREQ] Add S3C2416/S3C2450 cpufreq driver) Signed-off-by: Dan Carpenter Reviewed-by: Krzysztof Kozlowski Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index d6d4257..5b2db3c 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) rate = clk_get_rate(s3c_freq->hclk); if (rate < 133 * 1000 * 1000) { pr_err("cpufreq: HCLK not at 133MHz\n"); - clk_put(s3c_freq->hclk); ret = -EINVAL; goto err_armclk; } -- cgit v0.10.2 From 92e66676523a9f921dfaa383e37d3a4e2edf15df Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Fri, 6 Jan 2017 12:34:30 +0000 Subject: clk: scpi: don't add cpufreq device if the scpi dvfs node is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 67bcc2c5f1da8c5bb58e72354274ea5c59a3950a upstream. Currently we add the virtual cpufreq device unconditionally even when the SCPI DVFS clock provider node is disabled. This will cause cpufreq driver to throw errors when it gets initailised on boot/modprobe and also when the CPUs are hot-plugged back in. This patch fixes the issue by adding the virtual cpufreq device only if the SCPI DVFS clock provider is available and registered. Fixes: 9490f01e2471 ("clk: scpi: add support for cpufreq virtual device") Reported-by: Michał Zegan Cc: Neil Armstrong Signed-off-by: Sudeep Holla Tested-by: Michał Zegan Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index 2a3e9d8..96d3717 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -290,13 +290,15 @@ static int scpi_clocks_probe(struct platform_device *pdev) of_node_put(child); return ret; } - } - /* Add the virtual cpufreq device */ - cpufreq_dev = platform_device_register_simple("scpi-cpufreq", - -1, NULL, 0); - if (IS_ERR(cpufreq_dev)) - pr_warn("unable to register cpufreq device"); + if (match->data != &scpi_dvfs_ops) + continue; + /* Add the virtual cpufreq device if it's DVFS clock provider */ + cpufreq_dev = platform_device_register_simple("scpi-cpufreq", + -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + pr_warn("unable to register cpufreq device"); + } return 0; } -- cgit v0.10.2 From 3e51ccbadd15aa4a0e0a64535ec0566749361938 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 2 Mar 2017 16:57:23 -0600 Subject: objtool: Fix another GCC jump table detection issue commit 5c51f4ae84df0f9df33ac08aa5be50061a8b4242 upstream. Arnd Bergmann reported a (false positive) objtool warning: drivers/infiniband/sw/rxe/rxe_resp.o: warning: objtool: rxe_responder()+0xfe: sibling call from callable instruction with changed frame pointer The issue is in find_switch_table(). It tries to find a switch statement's jump table by walking backwards from an indirect jump instruction, looking for a relocation to the .rodata section. In this case it stopped walking prematurely: the first .rodata relocation it encountered was for a variable (resp_state_name) instead of a jump table, so it just assumed there wasn't a jump table. The fix is to ignore any .rodata relocation which refers to an ELF object symbol. This works because the jump tables are anonymous and have no symbols associated with them. Reported-by: Arnd Bergmann Tested-by: Arnd Bergmann Signed-off-by: Josh Poimboeuf Cc: Denys Vlasenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 3732710ff6f2 ("objtool: Improve rare switch jump table pattern detection") Link: http://lkml.kernel.org/r/20170302225723.3ndbsnl4hkqbne7a@treble Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index e8a1f69..b8dadb0 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -757,11 +757,20 @@ static struct rela *find_switch_table(struct objtool_file *file, insn->jump_dest->offset > orig_insn->offset)) break; + /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (text_rela && text_rela->sym == file->rodata->sym) - return find_rela_by_dest(file->rodata, - text_rela->addend); + if (!text_rela || text_rela->sym != file->rodata->sym) + continue; + + /* + * Make sure the .rodata address isn't associated with a + * symbol. gcc jump tables are anonymous data. + */ + if (find_symbol_containing(file->rodata, text_rela->addend)) + continue; + + return find_rela_by_dest(file->rodata, text_rela->addend); } return NULL; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 0d7983a..d897702 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) return NULL; } +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) +{ + struct symbol *sym; + + list_for_each_entry(sym, &sec->symbol_list, list) + if (sym->type != STT_SECTION && + offset >= sym->offset && offset < sym->offset + sym->len) + return sym; + + return NULL; +} + struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, unsigned int len) { diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index aa1ff65..731973e 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -79,6 +79,7 @@ struct elf { struct elf *elf_open(const char *name); struct section *find_section_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, unsigned int len); -- cgit v0.10.2 From 65fc82cea84f38ce918553b557f3a24c8d8c9649 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 24 Mar 2017 23:02:48 +0100 Subject: infiniband: hns: avoid gcc-7.0.1 warning for uninitialized data commit 5b0ff9a00755d4d9c209033a77f1ed8f3186fe5c upstream. hns_roce_v1_cq_set_ci() calls roce_set_bit() on an uninitialized field, which will then change only a few of its bits, causing a warning with the latest gcc: infiniband/hw/hns/hns_roce_hw_v1.c: In function 'hns_roce_v1_cq_set_ci': infiniband/hw/hns/hns_roce_hw_v1.c:1854:23: error: 'doorbell[1]' is used uninitialized in this function [-Werror=uninitialized] roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); The code is actually correct since we always set all bits of the port_vlan field, but gcc correctly points out that the first access does contain uninitialized data. This initializes the field to zero first before setting the individual bits. Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver") Signed-off-by: Arnd Bergmann Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 71232e5..20ec347 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1267,6 +1267,7 @@ void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) u32 doorbell[2]; doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1); + doorbell[1] = 0; roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); -- cgit v0.10.2 From 8ee785016d5a05afa9ddd872ae7befa11798bfbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 4 Jan 2017 12:09:41 +0100 Subject: brcmfmac: avoid writing channel out of allocated array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 77c0d0cd10e793989d1e8b835a9a09694182cb39 upstream. Our code was assigning number of channels to the index variable by default. If firmware reported channel we didn't predict this would result in using that initial index value and writing out of array. This never happened so far (we got a complete list of supported channels) but it means possible memory corruption so we should handle it anyway. This patch simply detects unexpected channel and ignores it. As we don't try to create new entry now, it's also safe to drop hw_value and center_freq assignment. For known channels we have these set anyway. I decided to fix this issue by assigning NULL or a target channel to the channel variable. This was one of possible ways, I prefefred this one as it also avoids using channel[index] over and over. Fixes: 58de92d2f95e ("brcmfmac: use static superset of channels for wiphy bands") Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 78d9966..0f5dde1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5913,7 +5913,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 i, j; u32 total; u32 chaninfo; - u32 index; pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); @@ -5961,33 +5960,36 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw == BRCMU_CHAN_BW_80) continue; - channel = band->channels; - index = band->n_channels; + channel = NULL; for (j = 0; j < band->n_channels; j++) { - if (channel[j].hw_value == ch.control_ch_num) { - index = j; + if (band->channels[j].hw_value == ch.control_ch_num) { + channel = &band->channels[j]; break; } } - channel[index].center_freq = - ieee80211_channel_to_frequency(ch.control_ch_num, - band->band); - channel[index].hw_value = ch.control_ch_num; + if (!channel) { + /* It seems firmware supports some channel we never + * considered. Something new in IEEE standard? + */ + brcmf_err("Ignoring unexpected firmware channel %d\n", + ch.control_ch_num); + continue; + } /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ if (ch.bw == BRCMU_CHAN_BW_80) { - channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ; + channel->flags &= ~IEEE80211_CHAN_NO_80MHZ; } else if (ch.bw == BRCMU_CHAN_BW_40) { - brcmf_update_bw40_channel_flag(&channel[index], &ch); + brcmf_update_bw40_channel_flag(channel, &ch); } else { /* enable the channel and disable other bandwidths * for now as mentioned order assure they are enabled * for subsequent chanspecs. */ - channel[index].flags = IEEE80211_CHAN_NO_HT40 | - IEEE80211_CHAN_NO_80MHZ; + channel->flags = IEEE80211_CHAN_NO_HT40 | + IEEE80211_CHAN_NO_80MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; @@ -5995,11 +5997,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) - channel[index].flags |= + channel->flags |= (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR); if (chaninfo & WL_CHAN_PASSIVE) - channel[index].flags |= + channel->flags |= IEEE80211_CHAN_NO_IR; } } -- cgit v0.10.2 From de5862335ed7c465b0900774fbd869bf91a23c58 Mon Sep 17 00:00:00 2001 From: Jaedon Shin Date: Fri, 3 Mar 2017 10:55:03 +0900 Subject: i2c: brcmstb: Fix START and STOP conditions commit 2de3ec4f1d4ba6ee380478055104eb918bd50cce upstream. The BSC data buffers to send and receive data are each of size 32 bytes or 8 bytes 'xfersz' depending on SoC. The problem observed for all the combined message transfer was if length of data transfer was a multiple of 'xfersz' a repeated START was being transmitted by BSC driver. Fixed this by appropriately setting START/STOP conditions for such transfers. Fixes: dd1aa2524bc5 ("i2c: brcmstb: Add Broadcom settop SoC i2c controller driver") Signed-off-by: Jaedon Shin Acked-by: Kamal Dasu Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 0652281..78792b4 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, u8 *tmp_buf; int len = 0; int xfersz = brcmstb_i2c_get_xfersz(dev); + u32 cond, cond_per_msg; if (dev->is_suspended) return -EBUSY; @@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) - brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP)); + cond = ~COND_START_STOP; else - brcmstb_set_i2c_start_stop(dev, - COND_RESTART | COND_NOSTOP); + cond = COND_RESTART | COND_NOSTOP; + + brcmstb_set_i2c_start_stop(dev, cond); /* Send slave address */ if (!(pmsg->flags & I2C_M_NOSTART)) { @@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, } } + cond_per_msg = cond; + /* Perform data transfer */ while (len) { bytes_to_xfer = min(len, xfersz); - if (len <= xfersz && i == (num - 1)) - brcmstb_set_i2c_start_stop(dev, - ~(COND_START_STOP)); + if (len <= xfersz) { + if (i == (num - 1)) + cond_per_msg = cond_per_msg & + ~(COND_RESTART | COND_NOSTOP); + else + cond_per_msg = cond; + } else { + cond_per_msg = (cond_per_msg & ~COND_RESTART) | + COND_NOSTOP; + } + + brcmstb_set_i2c_start_stop(dev, cond_per_msg); rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, bytes_to_xfer, pmsg); @@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, len -= bytes_to_xfer; tmp_buf += bytes_to_xfer; + + cond_per_msg = COND_NOSTART | COND_NOSTOP; } } -- cgit v0.10.2 From a4bfcab30928b1ef1a19b379f8d08efe10853a42 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Fri, 3 Mar 2017 16:16:53 -0500 Subject: mtd: nand: brcmnand: Check flash #WP pin status before nand erase/program commit 9d2ee0a60b8bd9bef2a0082c533736d6a7b39873 upstream. On brcmnand controller v6.x and v7.x, the #WP pin is controlled through the NAND_WP bit in CS_SELECT register. The driver currently assumes that toggling the #WP pin is instantaneously enabling/disabling write-protection, but it actually takes some time to propagate the new state to the internal NAND chip logic. This behavior is sometime causing data corruptions when an erase/program operation is executed before write-protection has really been disabled. Fixes: 27c5b17cd1b1 ("mtd: nand: add NAND driver "library" for Broadcom STB NAND controller") Signed-off-by: Kamal Dasu Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 9d2424b..d9fab22 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -101,6 +101,9 @@ struct brcm_nand_dma_desc { #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) +#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) +#define NAND_POLL_STATUS_TIMEOUT_MS 100 + /* Controller feature flags */ enum { BRCMNAND_HAS_1K_SECTORS = BIT(0), @@ -765,6 +768,31 @@ enum { CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), }; +static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, + u32 mask, u32 expected_val, + unsigned long timeout_ms) +{ + unsigned long limit; + u32 val; + + if (!timeout_ms) + timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS; + + limit = jiffies + msecs_to_jiffies(timeout_ms); + do { + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); + if ((val & mask) == expected_val) + return 0; + + cpu_relax(); + } while (time_after(limit, jiffies)); + + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", + expected_val, val & mask); + + return -ETIMEDOUT; +} + static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) { u32 val = en ? CS_SELECT_NAND_WP : 0; @@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { static int old_wp = -1; + int ret; if (old_wp != wp) { dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); old_wp = wp; } + + /* + * make sure ctrl/flash ready before and after + * changing state of #WP pin + */ + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | + NAND_STATUS_READY, + NAND_CTRL_RDY | + NAND_STATUS_READY, 0); + if (ret) + return; + brcmnand_set_wp(ctrl, wp); + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ + ret = bcmnand_ctrl_poll_status(ctrl, + NAND_CTRL_RDY | + NAND_STATUS_READY | + NAND_STATUS_WP, + NAND_CTRL_RDY | + NAND_STATUS_READY | + (wp ? 0 : NAND_STATUS_WP), 0); + + if (ret) + dev_err_ratelimited(&host->pdev->dev, + "nand #WP expected %s\n", + wp ? "on" : "off"); } } @@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data) static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) { struct brcmnand_controller *ctrl = host->ctrl; - u32 intfc; + int ret; dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; - intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); - WARN_ON(!(intfc & INTFC_CTLR_READY)); + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + WARN_ON(ret); mb(); /* flush previous writes */ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, -- cgit v0.10.2 From 982d8d92f25613e88f3a34a8a57da484f68d4c1d Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Fri, 24 Mar 2017 09:53:56 -0400 Subject: arm64: fix NULL dereference in have_cpu_die() commit 335d2c2d192266358c5dfa64953a4c162f46e464 upstream. Commit 5c492c3f5255 ("arm64: smp: Add function to determine if cpus are stuck in the kernel") added a helper function to determine if die() is supported in cpu_ops. This function assumes a cpu will have a valid cpu_ops entry, but that may not be the case for cpu0 is spin-table or parking protocol is used to boot secondary cpus. In that case, there is a NULL dereference if have_cpu_die() is called by cpu0. So add a check for a valid cpu_ops before dereferencing it. Fixes: 5c492c3f5255 ("arm64: smp: Add function to determine if cpus are stuck in the kernel") Signed-off-by: Mark Salter Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 8507703..a70f7d3 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -934,7 +934,7 @@ static bool have_cpu_die(void) #ifdef CONFIG_HOTPLUG_CPU int any_cpu = raw_smp_processor_id(); - if (cpu_ops[any_cpu]->cpu_die) + if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) return true; #endif return false; -- cgit v0.10.2 From 1eeb7942633225baad2f8465dd93a4fb72b4ec7f Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Tue, 25 Apr 2017 16:42:44 +0200 Subject: KVM: x86: fix emulation of RSM and IRET instructions commit 6ed071f051e12cf7baa1b69d3becb8f232fdfb7b upstream. On AMD, the effect of set_nmi_mask called by emulate_iret_real and em_rsm on hflags is reverted later on in x86_emulate_instruction where hflags are overwritten with ctxt->emul_flags (the kvm_set_hflags call). This manifests as a hang when rebooting Windows VMs with QEMU, OVMF, and >1 vcpu. Instead of trying to merge ctxt->emul_flags into vcpu->arch.hflags after an instruction is emulated, this commit deletes emul_flags altogether and makes the emulator access vcpu->arch.hflags using two new accessors. This way all changes, on the emulator side as well as in functions called from the emulator and accessing vcpu state with emul_to_vcpu, are preserved. More details on the bug and its manifestation with Windows and OVMF: It's a KVM bug in the interaction between SMI/SMM and NMI, specific to AMD. I believe that the SMM part explains why we started seeing this only with OVMF. KVM masks and unmasks NMI when entering and leaving SMM. When KVM emulates the RSM instruction in em_rsm, the set_nmi_mask call doesn't stick because later on in x86_emulate_instruction we overwrite arch.hflags with ctxt->emul_flags, effectively reverting the effect of the set_nmi_mask call. The AMD-specific hflag of interest here is HF_NMI_MASK. When rebooting the system, Windows sends an NMI IPI to all but the current cpu to shut them down. Only after all of them are parked in HLT will the initiating cpu finish the restart. If NMI is masked, other cpus never get the memo and the initiating cpu spins forever, waiting for hal!HalpInterruptProcessorsStarted to drop. That's the symptom we observe. Fixes: a584539b24b8 ("KVM: x86: pass the whole hflags field to emulator and back") Signed-off-by: Ladi Prosek Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7be..19d14ac 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -221,6 +221,9 @@ struct x86_emulate_ops { void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); + + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); + void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -290,7 +293,6 @@ struct x86_emulate_ctxt { /* interruptibility state, as a result of execution of STI or MOV SS */ int interruptibility; - int emul_flags; bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 9f676ad..de36660 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2543,7 +2543,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) u64 smbase; int ret; - if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) return emulate_ud(ctxt); /* @@ -2592,11 +2592,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) return X86EMUL_UNHANDLEABLE; } - if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ctxt->ops->set_nmi_mask(ctxt, false); - ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; - ctxt->emul_flags &= ~X86EMUL_SMM_MASK; + ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & + ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); return X86EMUL_CONTINUE; } @@ -5312,6 +5312,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; + unsigned emul_flags; ctxt->mem_read.pos = 0; @@ -5326,6 +5327,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } + emul_flags = ctxt->ops->get_hflags(ctxt); if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || @@ -5359,7 +5361,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5388,7 +5390,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5442,7 +5444,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) special_insn: - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ab3f003..e1c1003 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5154,6 +5154,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); } +static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) +{ + return emul_to_vcpu(ctxt)->arch.hflags; +} + +static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) +{ + kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); +} + static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, @@ -5193,6 +5203,8 @@ static const struct x86_emulate_ops emulate_ops = { .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, .set_nmi_mask = emulator_set_nmi_mask, + .get_hflags = emulator_get_hflags, + .set_hflags = emulator_set_hflags, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -5245,7 +5257,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); - ctxt->emul_flags = vcpu->arch.hflags; init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; @@ -5636,8 +5647,6 @@ restart: unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - if (vcpu->arch.hflags != ctxt->emul_flags) - kvm_set_hflags(vcpu, ctxt->emul_flags); kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); -- cgit v0.10.2 From f3c3ec96e5fb40b453693421577d446b5b22fc52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 18 May 2017 19:37:31 +0200 Subject: KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 34b0dadbdf698f9b277a31b2747b625b9a75ea1f upstream. Static analysis noticed that pmu->nr_arch_gp_counters can be 32 (INTEL_PMC_MAX_GENERIC) and therefore cannot be used to shift 'int'. I didn't add BUILD_BUG_ON for it as we have a better checker. Reported-by: Dan Carpenter Fixes: 25462f7f5295 ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch") Reviewed-by: Paolo Bonzini Reviewed-by: David Hildenbrand Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 9d4a850..5ab4a36 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ((u64)1 << edx.split.bit_width_fixed) - 1; } - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; -- cgit v0.10.2 From d1d3756f07da10505699d1d3a1227b5201da3ab8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 18 May 2017 19:37:30 +0200 Subject: KVM: x86: zero base3 of unusable segments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f0367ee1d64d27fa08be2407df5c125442e885e3 upstream. Static checker noticed that base3 could be used uninitialized if the segment was not present (useable). Random stack values probably would not pass VMCS entry checks. Reported-by: Dan Carpenter Fixes: 1aa366163b8b ("KVM: x86 emulator: consolidate segment accessors") Reviewed-by: Paolo Bonzini Reviewed-by: David Hildenbrand Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e1c1003..3dbcb09 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4999,6 +4999,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, if (var.unusable) { memset(desc, 0, sizeof(*desc)); + if (base3) + *base3 = 0; return false; } -- cgit v0.10.2 From a29fd27ca26832fe03341a7fec75ea3b4b86fb51 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 5 Jun 2017 05:19:09 -0700 Subject: KVM: nVMX: Fix exception injection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d4912215d1031e4fb3d1038d2e1857218dba0d0a upstream. WARNING: CPU: 3 PID: 2840 at arch/x86/kvm/vmx.c:10966 nested_vmx_vmexit+0xdcd/0xde0 [kvm_intel] CPU: 3 PID: 2840 Comm: qemu-system-x86 Tainted: G OE 4.12.0-rc3+ #23 RIP: 0010:nested_vmx_vmexit+0xdcd/0xde0 [kvm_intel] Call Trace: ? kvm_check_async_pf_completion+0xef/0x120 [kvm] ? rcu_read_lock_sched_held+0x79/0x80 vmx_queue_exception+0x104/0x160 [kvm_intel] ? vmx_queue_exception+0x104/0x160 [kvm_intel] kvm_arch_vcpu_ioctl_run+0x1171/0x1ce0 [kvm] ? kvm_arch_vcpu_load+0x47/0x240 [kvm] ? kvm_arch_vcpu_load+0x62/0x240 [kvm] kvm_vcpu_ioctl+0x384/0x7b0 [kvm] ? kvm_vcpu_ioctl+0x384/0x7b0 [kvm] ? __fget+0xf3/0x210 do_vfs_ioctl+0xa4/0x700 ? __fget+0x114/0x210 SyS_ioctl+0x79/0x90 do_syscall_64+0x81/0x220 entry_SYSCALL64_slow_path+0x25/0x25 This is triggered occasionally by running both win7 and win2016 in L2, in addition, EPT is disabled on both L1 and L2. It can't be reproduced easily. Commit 0b6ac343fc (KVM: nVMX: Correct handling of exception injection) mentioned that "KVM wants to inject page-faults which it got to the guest. This function assumes it is called with the exit reason in vmcs02 being a #PF exception". Commit e011c663 (KVM: nVMX: Check all exceptions for intercept during delivery to L2) allows to check all exceptions for intercept during delivery to L2. However, there is no guarantee the exit reason is exception currently, when there is an external interrupt occurred on host, maybe a time interrupt for host which should not be injected to guest, and somewhere queues an exception, then the function nested_vmx_check_exception() will be called and the vmexit emulation codes will try to emulate the "Acknowledge interrupt on exit" behavior, the warning is triggered. Reusing the exit reason from the L2->L0 vmexit is wrong in this case, the reason must always be EXCEPTION_NMI when injecting an exception into L1 as a nested vmexit. Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Fixes: e011c663b9c7 ("KVM: nVMX: Check all exceptions for intercept during delivery to L2") Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 89b98e0..04e6bbb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2455,7 +2455,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; -- cgit v0.10.2 From 9f86f302ec0e37e84617481c587e11c47a397e3f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 5 Jul 2017 14:40:44 +0200 Subject: Linux 4.9.36 diff --git a/Makefile b/Makefile index 0a8d474..4263dca 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 35 +SUBLEVEL = 36 EXTRAVERSION = NAME = Roaring Lionus -- cgit v0.10.2