From 233c96fc077d310772375d47522fb444ff546905 Mon Sep 17 00:00:00 2001 From: Miroslav Urbanek Date: Thu, 5 Feb 2015 16:36:50 +0100 Subject: flowcache: Fix kernel panic in flow_cache_flush_task MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit flow_cache_flush_task references a structure member flow_cache_gc_work where it should reference flow_cache_flush_task instead. Kernel panic occurs on kernels using IPsec during XFRM garbage collection. The garbage collection interval can be shortened using the following sysctl settings: net.ipv4.xfrm4_gc_thresh=4 net.ipv6.xfrm6_gc_thresh=4 With the default settings, our productions servers crash approximately once a week. With the settings above, they crash immediately. Fixes: ca925cf1534e ("flowcache: Make flow cache name space aware") Reported-by: Tomáš Charvát Tested-by: Jan Hejl Signed-off-by: Miroslav Urbanek Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/core/flow.c b/net/core/flow.c index a0348fd..1033725 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -379,7 +379,7 @@ done: static void flow_cache_flush_task(struct work_struct *work) { struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, - flow_cache_gc_work); + flow_cache_flush_work); struct net *net = container_of(xfrm, struct net, xfrm); flow_cache_flush(net); -- cgit v0.10.2 From c58da4c659803ac12eca5275c8a7064222adb4c7 Mon Sep 17 00:00:00 2001 From: Erik Kline Date: Wed, 4 Feb 2015 20:01:23 +0900 Subject: net: ipv6: allow explicitly choosing optimistic addresses RFC 4429 ("Optimistic DAD") states that optimistic addresses should be treated as deprecated addresses. From section 2.1: Unless noted otherwise, components of the IPv6 protocol stack should treat addresses in the Optimistic state equivalently to those in the Deprecated state, indicating that the address is available for use but should not be used if another suitable address is available. Optimistic addresses are indeed avoided when other addresses are available (i.e. at source address selection time), but they have not heretofore been available for things like explicit bind() and sendmsg() with struct in6_pktinfo, etc. This change makes optimistic addresses treated more like deprecated addresses than tentative ones. Signed-off-by: Erik Kline Acked-by: Lorenzo Colitti Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/include/net/addrconf.h b/include/net/addrconf.h index d13573b..80456f7 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -62,6 +62,9 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg); int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); +int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict, + u32 banned_flags); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f7c8bbe..62900ae 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1519,15 +1519,30 @@ static int ipv6_count_addresses(struct inet6_dev *idev) int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { + return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE); +} +EXPORT_SYMBOL(ipv6_chk_addr); + +int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict, + u32 banned_flags) +{ struct inet6_ifaddr *ifp; unsigned int hash = inet6_addr_hash(addr); + u32 ifp_flags; rcu_read_lock_bh(); hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { if (!net_eq(dev_net(ifp->idev->dev), net)) continue; + /* Decouple optimistic from tentative for evaluation here. + * Ban optimistic addresses explicitly, when required. + */ + ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC) + ? (ifp->flags&~IFA_F_TENTATIVE) + : ifp->flags; if (ipv6_addr_equal(&ifp->addr, addr) && - !(ifp->flags&IFA_F_TENTATIVE) && + !(ifp_flags&banned_flags) && (dev == NULL || ifp->idev->dev == dev || !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { rcu_read_unlock_bh(); @@ -1538,7 +1553,7 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, rcu_read_unlock_bh(); return 0; } -EXPORT_SYMBOL(ipv6_chk_addr); +EXPORT_SYMBOL(ipv6_chk_addr_and_flags); static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, struct net_device *dev) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 6828667..113fc6c 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -655,7 +655,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; int probes = atomic_read(&neigh->probes); - if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1)) + if (skb && ipv6_chk_addr_and_flags(dev_net(dev), &ipv6_hdr(skb)->saddr, + dev, 1, + IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) saddr = &ipv6_hdr(skb)->saddr; probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES); if (probes < 0) { -- cgit v0.10.2 From 7744b5f3693cc06695cb9d6667671c790282730f Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 4 Feb 2015 23:08:50 +0100 Subject: pktgen: fix UDP checksum computation This patch fixes two issues in UDP checksum computation in pktgen. First, the pseudo-header uses the source and destination IP addresses. Currently, the ports are used for IPv4. Second, the UDP checksum covers both header and data. So we need to generate the data earlier (move pktgen_finalize_skb up), and compute the checksum for UDP header + data. Fixes: c26bf4a51308c ("pktgen: Add UDPCSUM flag to support UDP checksums") Signed-off-by: Sabrina Dubroca Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index da934fc..9fa25b0 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2842,25 +2842,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->dev = odev; skb->pkt_type = PACKET_HOST; + pktgen_finalize_skb(pkt_dev, skb, datalen); + if (!(pkt_dev->flags & F_UDPCSUM)) { skb->ip_summed = CHECKSUM_NONE; } else if (odev->features & NETIF_F_V4_CSUM) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; - udp4_hwcsum(skb, udph->source, udph->dest); + udp4_hwcsum(skb, iph->saddr, iph->daddr); } else { - __wsum csum = udp_csum(skb); + __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0); /* add protocol-dependent pseudo-header */ - udph->check = csum_tcpudp_magic(udph->source, udph->dest, + udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, datalen + 8, IPPROTO_UDP, csum); if (udph->check == 0) udph->check = CSUM_MANGLED_0; } - pktgen_finalize_skb(pkt_dev, skb, datalen); - #ifdef CONFIG_XFRM if (!process_ipsec(pkt_dev, skb, protocol)) return NULL; @@ -2976,6 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->dev = odev; skb->pkt_type = PACKET_HOST; + pktgen_finalize_skb(pkt_dev, skb, datalen); + if (!(pkt_dev->flags & F_UDPCSUM)) { skb->ip_summed = CHECKSUM_NONE; } else if (odev->features & NETIF_F_V6_CSUM) { @@ -2984,7 +2986,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->csum_offset = offsetof(struct udphdr, check); udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); } else { - __wsum csum = udp_csum(skb); + __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0); /* add protocol-dependent pseudo-header */ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); @@ -2993,8 +2995,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, udph->check = CSUM_MANGLED_0; } - pktgen_finalize_skb(pkt_dev, skb, datalen); - return skb; } -- cgit v0.10.2 From e8a308affcd79d95dad111f7872e43e9f73abb3b Mon Sep 17 00:00:00 2001 From: Kiran Padwal Date: Thu, 5 Feb 2015 17:01:37 +0530 Subject: ARCNET: Add missing error check for devm_kzalloc This patch add a missing check on the return value of devm_kzalloc, which would cause a NULL pointer dereference in a OOM situation. Signed-off-by: Kiran Padwal Signed-off-by: David S. Miller diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 6c99ff0..945f532 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -78,6 +78,9 @@ static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *i priv = devm_kzalloc(&pdev->dev, sizeof(struct com20020_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + ci = (struct com20020_pci_card_info *)id->driver_data; priv->ci = ci; -- cgit v0.10.2 From 37c85c3498c5538db050ff287e346127dbc16f7c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 5 Feb 2015 11:00:42 +0300 Subject: net: sxgbe: fix error handling in init_rx_ring() There are a couple bugs with the error handling in this function. 1) If we can't allocate "rx_ring->rx_skbuff" then we should call dma_free_coherent() but we don't. 2) free_rx_ring() frees "rx_ring->rx_skbuff_dma" and "rx_ring->rx_skbuff" so calling it in a loop causes a double free. Also it was a bit confusing how we sometimes freed things before doing the goto. I've cleaned it up so it does error handling in normal kernel style. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index b1a2718..d860dca 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -365,6 +365,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev, return 0; } + +/** + * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated + * @dev: net device structure + * @rx_ring: ring to be freed + * @rx_rsize: ring size + * Description: this function initializes the DMA RX descriptor + */ +static void sxgbe_free_rx_buffers(struct net_device *dev, + struct sxgbe_rx_norm_desc *p, int i, + unsigned int dma_buf_sz, + struct sxgbe_rx_queue *rx_ring) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + kfree_skb(rx_ring->rx_skbuff[i]); + dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], + dma_buf_sz, DMA_FROM_DEVICE); +} + /** * init_tx_ring - init the TX descriptor ring * @dev: net device structure @@ -457,7 +477,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, /* RX ring is not allcoated */ if (rx_ring == NULL) { netdev_err(dev, "No memory for RX queue\n"); - goto error; + return -ENOMEM; } /* assign queue number */ @@ -469,23 +489,21 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, &rx_ring->dma_rx_phy, GFP_KERNEL); if (rx_ring->dma_rx == NULL) - goto error; + return -ENOMEM; /* allocate memory for RX skbuff array */ rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, sizeof(dma_addr_t), GFP_KERNEL); if (!rx_ring->rx_skbuff_dma) { - dma_free_coherent(priv->device, - rx_rsize * sizeof(struct sxgbe_rx_norm_desc), - rx_ring->dma_rx, rx_ring->dma_rx_phy); - goto error; + ret = -ENOMEM; + goto err_free_dma_rx; } rx_ring->rx_skbuff = kmalloc_array(rx_rsize, sizeof(struct sk_buff *), GFP_KERNEL); if (!rx_ring->rx_skbuff) { - kfree(rx_ring->rx_skbuff_dma); - goto error; + ret = -ENOMEM; + goto err_free_skbuff_dma; } /* initialise the buffers */ @@ -495,7 +513,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, ret = sxgbe_init_rx_buffers(dev, p, desc_index, bfsize, rx_ring); if (ret) - goto err_init_rx_buffers; + goto err_free_rx_buffers; } /* initalise counters */ @@ -505,11 +523,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, return 0; -err_init_rx_buffers: - while (--desc_index >= 0) - free_rx_ring(priv->device, rx_ring, desc_index); -error: - return -ENOMEM; +err_free_rx_buffers: + while (--desc_index >= 0) { + struct sxgbe_rx_norm_desc *p; + + p = rx_ring->dma_rx + desc_index; + sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring); + } + kfree(rx_ring->rx_skbuff); +err_free_skbuff_dma: + kfree(rx_ring->rx_skbuff_dma); +err_free_dma_rx: + dma_free_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); + + return ret; } /** * free_tx_ring - free the TX descriptor ring -- cgit v0.10.2 From 11b1f8288d4341af5d755281c871bff6c3e270dd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 5 Feb 2015 14:39:11 +0100 Subject: ipv6: addrconf: add missing validate_link_af handler We still need a validate_link_af() handler with an appropriate nla policy, similarly as we have in IPv4 case, otherwise size validations are not being done properly in that case. Fixes: f53adae4eae5 ("net: ipv6: add tokenized interface identifier support") Fixes: bc91b0f07ada ("ipv6: addrconf: implement address generation modes") Cc: Jiri Pirko Signed-off-by: Daniel Borkmann Acked-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 62900ae..754e683 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4587,6 +4587,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) return 0; } +static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = { + [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 }, + [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) }, +}; + +static int inet6_validate_link_af(const struct net_device *dev, + const struct nlattr *nla) +{ + struct nlattr *tb[IFLA_INET6_MAX + 1]; + + if (dev && !__in6_dev_get(dev)) + return -EAFNOSUPPORT; + + return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy); +} + static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) { int err = -EINVAL; @@ -5408,6 +5424,7 @@ static struct rtnl_af_ops inet6_ops = { .family = AF_INET6, .fill_link_af = inet6_fill_link_af, .get_link_af_size = inet6_get_link_af_size, + .validate_link_af = inet6_validate_link_af, .set_link_af = inet6_set_link_af, }; -- cgit v0.10.2 From 6e0ba47f9191511a91556b7ca2c491362680a0f3 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Thu, 5 Feb 2015 14:52:06 +0100 Subject: dsa: do not dereference non-existing routing table In the case where there is only one switch, no routing table will have been allocated, so do not dereference it in this case. Signed-off-by: Tobias Waldekranz Signed-off-by: David S. Miller diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 1230f52..2540ef0 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -139,7 +139,8 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) int nexthop; nexthop = 0x1f; - if (i != ds->index && i < ds->dst->pd->nr_chips) + if (ds->pd->rtable && + i != ds->index && i < ds->dst->pd->nr_chips) nexthop = ds->pd->rtable[i] & 0x1f; REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); -- cgit v0.10.2 From e04449fcf2cd63dec176355a028bd28b4d469be9 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Thu, 5 Feb 2015 14:54:09 +0100 Subject: dsa: correctly determine the number of switches in a system The number of connected switches was sourced from the number of children to the DSA node, change it to the number of available children, skipping any disabled switches. Fixes: 5e95329b701c4 ("dsa: add device tree bindings to register DSA switches") Signed-off-by: Tobias Waldekranz Acked-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 3731714..2173402 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -603,7 +603,7 @@ static int dsa_of_probe(struct platform_device *pdev) pdev->dev.platform_data = pd; pd->netdev = ðernet_dev->dev; - pd->nr_chips = of_get_child_count(np); + pd->nr_chips = of_get_available_child_count(np); if (pd->nr_chips > DSA_MAX_SWITCHES) pd->nr_chips = DSA_MAX_SWITCHES; -- cgit v0.10.2 From 364d5716a7adb91b731a35765d369602d68d2881 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 5 Feb 2015 18:44:04 +0100 Subject: rtnetlink: ifla_vf_policy: fix misuses of NLA_BINARY ifla_vf_policy[] is wrong in advertising its individual member types as NLA_BINARY since .type = NLA_BINARY in combination with .len declares the len member as *max* attribute length [0, len]. The issue is that when do_setvfinfo() is being called to set up a VF through ndo handler, we could set corrupted data if the attribute length is less than the size of the related structure itself. The intent is exactly the opposite, namely to make sure to pass at least data of minimum size of len. Fixes: ebc08a6f47ee ("rtnetlink: Add VF config code to rtnetlink") Cc: Mitch Williams Cc: Jeff Kirsher Signed-off-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 446cbaf..5daabfd 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1237,18 +1237,12 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { }; static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { - [IFLA_VF_MAC] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_mac) }, - [IFLA_VF_VLAN] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_vlan) }, - [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_tx_rate) }, - [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_spoofchk) }, - [IFLA_VF_RATE] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_rate) }, - [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_link_state) }, + [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, + [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, + [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, + [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, + [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, + [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, }; static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { -- cgit v0.10.2 From 91e83133e70ebe1572746d1ad858b4eb28ab9b53 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 5 Feb 2015 14:58:14 -0800 Subject: net: use netif_rx_ni() from process context Hotpluging a cpu might be rare, yet we have to use proper handlers when taking over packets found in backlog queues. dev_cpu_callback() runs from process context, thus we should call netif_rx_ni() to properly invoke softirq handler. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 7fe8292..6c1556a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7064,11 +7064,11 @@ static int dev_cpu_callback(struct notifier_block *nfb, /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { - netif_rx_internal(skb); + netif_rx_ni(skb); input_queue_head_incr(oldsd); } while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { - netif_rx_internal(skb); + netif_rx_ni(skb); input_queue_head_incr(oldsd); } -- cgit v0.10.2 From fd972b736bfec7e0297dac9501211abb91b436fd Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 5 Feb 2015 19:17:14 -0600 Subject: amd-xgbe: Check per channel DMA interrupt use in main ISR When using per channel DMA interrupts the transmit interrupt (TI) and the receive interrupt (RI) are masked off so as to not generate an interrupt to the main ISR. However, should another interrupt fire for the DMA channel that is handled by the main ISR the TI/RI bits can still be set. This will cause the wrong and uninitialized napi structure to be used causing a panic. Add a check to be sure per channel DMA interrupts are not enabled before acting on those bit flags. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index e5ffb2c..477a7e3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -337,12 +337,13 @@ static irqreturn_t xgbe_isr(int irq, void *data) dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); - /* If we get a TI or RI interrupt that means per channel DMA - * interrupts are not enabled, so we use the private data napi - * structure, not the per channel napi structure + /* The TI or RI interrupt bits may still be set even if using + * per channel DMA interrupts. Check to be sure those are not + * enabled before using the private data napi structure. */ - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || - XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { + if (!pdata->per_channel_irq && + (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || + XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { if (napi_schedule_prep(&pdata->napi)) { /* Disable Tx and Rx interrupts */ xgbe_disable_rx_tx_ints(pdata); -- cgit v0.10.2 From a4870f79c228d109c1e51df4a899394515271604 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Sat, 7 Feb 2015 03:17:31 +0100 Subject: vxlan: Wrong type passed to %pIS src_ip is a pointer to a union vxlan_addr, one member of which is a struct sockaddr. Passing a pointer to src_ip is wrong; one should pass the value of src_ip itself. Since %pIS formally expects something of type struct sockaddr*, let's pass a pointer to the appropriate union member, though this of course doesn't change the generated code. Fixes: e4c7ed415387 ("vxlan: add ipv6 support") Signed-off-by: Rasmus Villemoes Acked-by: Cong Wang Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a8c755d..11defbb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -991,7 +991,7 @@ static bool vxlan_snoop(struct net_device *dev, if (net_ratelimit()) netdev_info(dev, "%pM migrated from %pIS to %pIS\n", - src_mac, &rdst->remote_ip, &src_ip); + src_mac, &rdst->remote_ip.sa, &src_ip->sa); rdst->remote_ip = *src_ip; f->updated = jiffies; -- cgit v0.10.2 From b750f5b4273316b4bb4d0a4a474c1eeaf0833648 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 9 Feb 2015 02:29:55 +0100 Subject: net: dsa: Remove redundant phy_attach() dsa_slave_phy_setup() finds the phy for the port via device tree and using of_phy_connect(), or it uses the fall back of taking a phy from the switch internal mdio bus and calling phy_connect_direct(). Either way, if a phy is found, phy_attach_direct() is called to attach the phy to the slave device. In dsa_slave_create(), a second call to phy_attach() is made. This results in the warning "PHY already attached". Remove this second, redundant attaching of the phy. Signed-off-by: Andrew Lunn Acked-by: Florian Fainelli Tested-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 589aafd..d104ae1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -676,18 +676,5 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, netif_carrier_off(slave_dev); - if (p->phy != NULL) { - if (ds->drv->get_phy_flags) - p->phy->dev_flags |= ds->drv->get_phy_flags(ds, port); - - phy_attach(slave_dev, dev_name(&p->phy->dev), - PHY_INTERFACE_MODE_GMII); - - p->phy->autoneg = AUTONEG_ENABLE; - p->phy->speed = 0; - p->phy->duplex = 0; - p->phy->advertising = p->phy->supported | ADVERTISED_Autoneg; - } - return slave_dev; } -- cgit v0.10.2 From 25d3b493a52d4ece811ba07881558fc7f6778fb8 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 9 Feb 2015 20:16:17 +0900 Subject: bridge: Fix inability to add non-vlan fdb entry Bridge's default_pvid adds a vid by default, by which we cannot add a non-vlan fdb entry by default, because br_fdb_add() adds fdb entries for all vlans instead of a non-vlan one when any vlan is configured. # ip link add br0 type bridge # ip link set eth0 master br0 # bridge fdb add 12:34:56:78:90:ab dev eth0 master temp # bridge fdb show brport eth0 | grep 12:34:56:78:90:ab 12:34:56:78:90:ab dev eth0 vlan 1 static We expect a non-vlan fdb entry as well as vlan 1: 12:34:56:78:90:ab dev eth0 static To fix this, we need to insert a non-vlan fdb entry if vlan is not specified, even when any vlan is configured. Fixes: 5be5a2df40f0 ("bridge: Add filtering support for default_pvid") Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index cc36e59..c041f99 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -840,10 +840,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* VID was specified, so use it. */ err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); } else { - if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { - err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); + err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); + if (err || !pv) goto out; - } /* We have vlans configured on this port and user didn't * specify a VLAN. To be nice, add/update entry for every @@ -911,16 +910,15 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], err = __br_fdb_delete(p, addr, vid); } else { - if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { - err = __br_fdb_delete(p, addr, 0); + err = -ENOENT; + err &= __br_fdb_delete(p, addr, 0); + if (!pv) goto out; - } /* We have vlans configured on this port and user didn't * specify a VLAN. To be nice, add/update entry for every * vlan on this port. */ - err = -ENOENT; for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { err &= __br_fdb_delete(p, addr, vid); } -- cgit v0.10.2 From 51f30770e50eb787200f30a79105e2615b379334 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Mon, 9 Feb 2015 09:38:20 -0500 Subject: ipv6: Fix fragment id assignment on LE arches. Recent commit: 0508c07f5e0c94f38afd5434e8b2a55b84553077 Author: Vlad Yasevich Date: Tue Feb 3 16:36:15 2015 -0500 ipv6: Select fragment id during UFO segmentation if not set. Introduced a bug on LE in how ipv6 fragment id is assigned. This was cought by nightly sparce check: Resolve the following sparce error: net/ipv6/output_core.c:57:38: sparse: incorrect type in assignment (different base types) net/ipv6/output_core.c:57:38: expected restricted __be32 [usertype] ip6_frag_id net/ipv6/output_core.c:57:38: got unsigned int [unsigned] [assigned] [usertype] id Fixes: 0508c07f5e0c9 (ipv6: Select fragment id during UFO segmentation if not set.) Signed-off-by: Vladislav Yasevich Signed-off-by: David S. Miller diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 54520a0..a86cf60 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -54,7 +54,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb) id = __ipv6_select_ident(ip6_proxy_idents_hashrnd, &addrs[1], &addrs[0]); - skb_shinfo(skb)->ip6_frag_id = id; + skb_shinfo(skb)->ip6_frag_id = htonl(id); } EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); -- cgit v0.10.2 From 8381eacf5c3b35cf7755f4bc521c4d56d24c1cd9 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Mon, 9 Feb 2015 09:38:21 -0500 Subject: ipv6: Make __ipv6_select_ident static Make __ipv6_select_ident() static as it isn't used outside the file. Fixes: 0508c07f5e0c9 (ipv6: Select fragment id during UFO segmentation if not set.) Signed-off-by: Vladislav Yasevich Signed-off-by: David S. Miller diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6e416f6..fde3b59 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -671,8 +671,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } -u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, - struct in6_addr *src); void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); void ipv6_proxy_select_ident(struct sk_buff *skb); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index a86cf60..74581f7 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -9,7 +9,8 @@ #include #include -u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src) +static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, + struct in6_addr *src) { u32 hash, id; -- cgit v0.10.2 From 531c94a9681b8c253fd0490a4ca8bbe01a38c78b Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Mon, 9 Feb 2015 12:35:23 -0800 Subject: tcp: don't include Fast Open option in SYN-ACK on pure SYN-data If a server has enabled Fast Open and it receives a pure SYN-data packet (without a Fast Open option), it won't accept the data but it incorrectly returns a SYN-ACK with a Fast Open cookie and also increments the SNMP stat LINUX_MIB_TCPFASTOPENPASSIVEFAIL. This patch makes the server include a Fast Open cookie in SYN-ACK only if the SYN has some Fast Open option (i.e., when client requests or presents a cookie). Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 815c85e..53db2c3 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -255,6 +255,9 @@ bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct tcp_fastopen_cookie valid_foc = { .len = -1 }; bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; + if (foc->len == 0) /* Client requests a cookie */ + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); + if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && (syn_data || foc->len >= 0) && tcp_fastopen_queue_check(sk))) { @@ -265,7 +268,8 @@ bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD)) goto fastopen; - if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) && + if (foc->len >= 0 && /* Client presents or requests a cookie */ + tcp_fastopen_cookie_gen(req, skb, &valid_foc) && foc->len == TCP_FASTOPEN_COOKIE_SIZE && foc->len == valid_foc.len && !memcmp(foc->val, valid_foc.val, foc->len)) { @@ -284,11 +288,10 @@ fastopen: LINUX_MIB_TCPFASTOPENPASSIVE); return true; } - } + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); + } else if (foc->len > 0) /* Client presents an invalid cookie */ + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); - NET_INC_STATS_BH(sock_net(sk), foc->len ? - LINUX_MIB_TCPFASTOPENPASSIVEFAIL : - LINUX_MIB_TCPFASTOPENCOOKIEREQD); *foc = valid_foc; return false; } -- cgit v0.10.2