diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-21 19:57:34 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-21 19:57:34 (GMT) |
commit | c5edde3a81149d29ceae4221f09f4c7bc2f70846 (patch) | |
tree | 5260db3beec59da2a2276231d083e8e2b63b8c2a /drivers | |
parent | f862d66a1a4609e61cd87c7ff4c7d9a234103d67 (diff) | |
parent | b4f70527f052b0c00be4d7cac562baa75b212df5 (diff) | |
download | linux-c5edde3a81149d29ceae4221f09f4c7bc2f70846.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix memory leak in iwlwifi, from Matti Gottlieb.
2) Add missing registration of netfilter arp_tables into initial
namespace, from Florian Westphal.
3) Fix potential NULL deref in DecNET routing code.
4) Restrict NETLINK_URELEASE to truly bound sockets only, from Dmitry
Ivanov.
5) Fix dst ref counting in VRF, from David Ahern.
6) Fix TSO segmenting limits in i40e driver, from Alexander Duyck.
7) Fix heap leak in PACKET_DIAG_MCLIST, from Mathias Krause.
8) Ravalidate IPV6 datagram socket cached routes properly, particularly
with UDP, from Martin KaFai Lau.
9) Fix endian bug in RDS dp_ack_seq handling, from Qing Huang.
10) Fix stats typing in bcmgenet driver, from Eric Dumazet.
11) Openvswitch needs to orphan SKBs before ipv6 fragmentation handing,
from Joe Stringer.
12) SPI device reference leak in spi_ks8895 PHY driver, from Mark Brown.
13) atl2 doesn't actually support scatter-gather, so don't advertise the
feature. From Ben Hucthings.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (72 commits)
openvswitch: use flow protocol when recalculating ipv6 checksums
Driver: Vmxnet3: set CHECKSUM_UNNECESSARY for IPv6 packets
atl2: Disable unimplemented scatter/gather feature
net/mlx4_en: Split SW RX dropped counter per RX ring
net/mlx4_core: Don't allow to VF change global pause settings
net/mlx4_core: Avoid repeated calls to pci enable/disable
net/mlx4_core: Implement pci_resume callback
net: phy: spi_ks8895: Don't leak references to SPI devices
net: ethernet: davinci_emac: Fix platform_data overwrite
net: ethernet: davinci_emac: Fix Unbalanced pm_runtime_enable
qede: Fix single MTU sized packet from firmware GRO flow
qede: Fix setting Skb network header
qede: Fix various memory allocation error flows for fastpath
tcp: Merge tx_flags and tskey in tcp_shifted_skb
tcp: Merge tx_flags and tskey in tcp_collapse_retrans
drivers: net: cpsw: fix wrong regs access in cpsw_ndo_open
tcp: Fix SOF_TIMESTAMPING_TX_ACK when handling dup acks
openvswitch: Orphan skbs before IPv6 defrag
Revert "Prevent NUll pointer dereference with two PHYs on cpsw"
VSOCK: Only check error on skb_recv_datagram when skb is NULL
...
Diffstat (limited to 'drivers')
40 files changed, 421 insertions, 389 deletions
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 786be8f..1f63547 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id) return false; } -#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) static struct device_node *bcma_of_find_child_device(struct platform_device *parent, struct bcma_device *core) { @@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent, struct of_phandle_args out_irq; int ret; - if (!parent || !parent->dev.of_node) + if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node) return 0; ret = bcma_of_irq_parse(parent, core, &out_irq, num); @@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent, { struct device_node *node; + if (!IS_ENABLED(CONFIG_OF_IRQ)) + return; + node = bcma_of_find_child_device(parent, core); if (node) core->dev.of_node = node; core->irq = bcma_of_get_irq(parent, core, 0); } -#else -static void bcma_of_fill_device(struct platform_device *parent, - struct bcma_device *core) -{ -} -static inline unsigned int bcma_of_get_irq(struct platform_device *parent, - struct bcma_device *core, int num) -{ - return 0; -} -#endif /* CONFIG_OF */ unsigned int bcma_core_irq(struct bcma_device *core, int num) { diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 0d29b5a..99e5f97 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) if (!maddr || maddr->family != AF_ISDN) return -EINVAL; + if (addr_len < sizeof(struct sockaddr_mISDN)) + return -EINVAL; + lock_sock(sk); if (_pms(sk)->dev) { diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2a1ba62b..a24c18e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -195,6 +195,7 @@ config GENEVE config MACSEC tristate "IEEE 802.1AE MAC-level encryption (MACsec)" + select CRYPTO select CRYPTO_AES select CRYPTO_GCM ---help--- diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 50454be..a290402 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u16 fid; int i, err; mutex_lock(&ps->smi_mutex); - /* Get or create the bridge FID and assign it to the port */ - for (i = 0; i < ps->num_ports; ++i) - if (ps->ports[i].bridge_dev == bridge) - break; - - if (i < ps->num_ports) - err = _mv88e6xxx_port_fid_get(ds, i, &fid); - else - err = _mv88e6xxx_fid_new(ds, &fid); - if (err) - goto unlock; - - err = _mv88e6xxx_port_fid_set(ds, port, fid); - if (err) - goto unlock; - /* Assign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = bridge; @@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, } } -unlock: mutex_unlock(&ps->smi_mutex); return err; @@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct net_device *bridge = ps->ports[port].bridge_dev; - u16 fid; int i; mutex_lock(&ps->smi_mutex); - /* Give the port a fresh Filtering Information Database */ - if (_mv88e6xxx_fid_new(ds, &fid) || - _mv88e6xxx_port_fid_set(ds, port, fid)) - netdev_warn(ds->ports[port], "failed to assign a new FID\n"); - /* Unassign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = NULL; @@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * the other bits clear. */ reg = 1 << port; - /* Disable learning for DSA and CPU ports */ - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) - reg = PORT_ASSOC_VECTOR_LOCKED_PORT; + /* Disable learning for CPU port */ + if (dsa_is_cpu_port(ds, port)) + reg = 0; ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); if (ret) @@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (ret) goto abort; - /* Port based VLAN map: give each port its own address + /* Port based VLAN map: give each port the same default address * database, and allow bidirectional communication between the * CPU and DSA port(s), and the other ports. */ - ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); + ret = _mv88e6xxx_port_fid_set(ds, port, 0); if (ret) goto abort; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 8f76f45..2ff4658 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); /* Init PHY as early as possible due to power saving issue */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 99b30a9..38db2e4 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core) dev_warn(&core->dev, "Using random MAC: %pM\n", mac); } + /* This (reset &) enable is not preset in specs or reference driver but + * Broadcom does it in arch PCI code when enabling fake PCI device. + */ + bcma_core_enable(core, 0); + /* Allocation and references */ net_dev = alloc_etherdev(sizeof(*bgmac)); if (!net_dev) diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4fbb093..9a03c14 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -199,9 +199,9 @@ #define BGMAC_CMDCFG_TAI 0x00000200 #define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ #define BGMAC_CMDCFG_HD_SHIFT 10 -#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ -#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ -#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) +#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */ +#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */ +#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ #define BGMAC_CMDCFG_AE 0x00400000 #define BGMAC_CMDCFG_CFE 0x00800000 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index cf6445d..44ad149 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(u32 *)p; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; } } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9679515..d20539a 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx) } lmac++; - if (lmac == MAX_LMAC_PER_BGX) + if (lmac == MAX_LMAC_PER_BGX) { + of_node_put(node); break; + } } - of_node_put(node); return 0; defer: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 984a3cc..326d400 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 *valp); int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 13b144b..6278e5a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) void t4_free_sge_resources(struct adapter *adap) { int i; - struct sge_eth_rxq *eq = adap->sge.ethrxq; - struct sge_eth_txq *etq = adap->sge.ethtxq; + struct sge_eth_rxq *eq; + struct sge_eth_txq *etq; + + /* stop all Rx queues in order to start them draining */ + for (i = 0; i < adap->sge.ethqsets; i++) { + eq = &adap->sge.ethrxq[i]; + if (eq->rspq.desc) + t4_iq_stop(adap, adap->mbox, adap->pf, 0, + FW_IQ_TYPE_FL_INT_CAP, + eq->rspq.cntxt_id, + eq->fl.size ? eq->fl.cntxt_id : 0xffff, + 0xffff); + } /* clean up Ethernet Tx/Rx queues */ - for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + for (i = 0; i < adap->sge.ethqsets; i++) { + eq = &adap->sge.ethrxq[i]; if (eq->rspq.desc) free_rspq_fl(adap, &eq->rspq, eq->fl.size ? &eq->fl : NULL); + + etq = &adap->sge.ethtxq[i]; if (etq->q.desc) { t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, etq->q.cntxt_id); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index c7efb11..71586a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -6950,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, } /** + * t4_iq_stop - stop an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Stops an ingress queue and its associated FLs, if any. This causes + * any current or future data/messages destined for these queues to be + * tossed. + */ +int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | + FW_IQ_CMD_VFN_V(vf)); + c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c)); + c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 62ccebc..8cf943d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, if (err) return err; - /* verify upper 16 bits are zero */ - if (vid >> 16) - return FM10K_ERR_PARAM; - set = !(vid & FM10K_VLAN_CLEAR); vid &= ~FM10K_VLAN_CLEAR; - err = fm10k_iov_select_vid(vf_info, (u16)vid); - if (err < 0) - return err; + /* if the length field has been set, this is a multi-bit + * update request. For multi-bit requests, simply disallow + * them when the pf_vid has been set. In this case, the PF + * should have already cleared the VLAN_TABLE, and if we + * allowed them, it could allow a rogue VF to receive traffic + * on a VLAN it was not assigned. In the single-bit case, we + * need to modify requests for VLAN 0 to use the default PF or + * SW vid when assigned. + */ - vid = err; + if (vid >> 16) { + /* prevent multi-bit requests when PF has + * administratively set the VLAN for this VF + */ + if (vf_info->pf_vid) + return FM10K_ERR_PARAM; + } else { + err = fm10k_iov_select_vid(vf_info, (u16)vid); + if (err < 0) + return err; + + vid = err; + } /* update VSI info for VF in regards to VLAN table */ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 084d0ab..6a49b7a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * __i40e_chk_linearize - Check if there are more than 8 fragments per packet + * __i40e_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * - * Note: Our HW can't scatter-gather more than 8 fragments to build - * a packet on the wire and so we need to figure out the cases where we - * need to linearize the skb. + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire + * and so we need to figure out the cases where we need to linearize the skb. + * + * For TSO we need to count the TSO header and segment payload separately. + * As such we need to check cases where we have 7 fragments or more as we + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for + * the segment payload in the first descriptor, and another 7 for the + * fragments. **/ bool __i40e_chk_linearize(struct sk_buff *skb) { const struct skb_frag_struct *frag, *stale; - int gso_size, nr_frags, sum; - - /* check to see if TSO is enabled, if so we may get a repreive */ - gso_size = skb_shinfo(skb)->gso_size; - if (unlikely(!gso_size)) - return true; + int nr_frags, sum; - /* no need to check if number of frags is less than 8 */ + /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < I40E_MAX_BUFFER_TXD) + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. However we don't need - * to perform such validation on the first or last 6 since the first - * 6 cannot inherit any data from a descriptor before them, and the - * last 6 cannot inherit any data from a descriptor after them. + * to perform such validation on the last 6 since the last 6 cannot + * inherit any data from a descriptor after them. */ - nr_frags -= I40E_MAX_BUFFER_TXD - 1; + nr_frags -= I40E_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We @@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors. */ - sum = 1 - gso_size; + sum = 1 - skb_shinfo(skb)->gso_size; - /* Add size of frags 1 through 5 to create our initial sum */ - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ stale = &skb_shinfo(skb)->frags[0]; for (;;) { - sum += skb_frag_size(++frag); + sum += skb_frag_size(frag++); /* if sum is negative we failed to make sufficient progress */ if (sum < 0) @@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) if (!--nr_frags) break; - sum -= skb_frag_size(++stale); + sum -= skb_frag_size(stale++); } return false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index cdd5dc0..a9bd705 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) { - /* we can only support up to 8 data buffers for a single send */ - if (likely(count <= I40E_MAX_BUFFER_TXD)) + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < I40E_MAX_BUFFER_TXD)) return false; - return __i40e_chk_linearize(skb); + if (skb_is_gso(skb)) + return __i40e_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != I40E_MAX_BUFFER_TXD; } #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index ebcc25c..cea97da 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, } /** - * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet + * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * - * Note: Our HW can't scatter-gather more than 8 fragments to build - * a packet on the wire and so we need to figure out the cases where we - * need to linearize the skb. + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire + * and so we need to figure out the cases where we need to linearize the skb. + * + * For TSO we need to count the TSO header and segment payload separately. + * As such we need to check cases where we have 7 fragments or more as we + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for + * the segment payload in the first descriptor, and another 7 for the + * fragments. **/ bool __i40evf_chk_linearize(struct sk_buff *skb) { const struct skb_frag_struct *frag, *stale; - int gso_size, nr_frags, sum; - - /* check to see if TSO is enabled, if so we may get a repreive */ - gso_size = skb_shinfo(skb)->gso_size; - if (unlikely(!gso_size)) - return true; + int nr_frags, sum; - /* no need to check if number of frags is less than 8 */ + /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < I40E_MAX_BUFFER_TXD) + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. However we don't need - * to perform such validation on the first or last 6 since the first - * 6 cannot inherit any data from a descriptor before them, and the - * last 6 cannot inherit any data from a descriptor after them. + * to perform such validation on the last 6 since the last 6 cannot + * inherit any data from a descriptor after them. */ - nr_frags -= I40E_MAX_BUFFER_TXD - 1; + nr_frags -= I40E_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We @@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors. */ - sum = 1 - gso_size; + sum = 1 - skb_shinfo(skb)->gso_size; - /* Add size of frags 1 through 5 to create our initial sum */ - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ stale = &skb_shinfo(skb)->frags[0]; for (;;) { - sum += skb_frag_size(++frag); + sum += skb_frag_size(frag++); /* if sum is negative we failed to make sufficient progress */ if (sum < 0) @@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) if (!--nr_frags) break; - sum -= skb_frag_size(++stale); + sum -= skb_frag_size(stale++); } return false; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c1dd8c5..0429553 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) { - /* we can only support up to 8 data buffers for a single send */ - if (likely(count <= I40E_MAX_BUFFER_TXD)) + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < I40E_MAX_BUFFER_TXD)) return false; - return __i40evf_chk_linearize(skb); + if (skb_is_gso(skb)) + return __i40evf_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != I40E_MAX_BUFFER_TXD; } #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f69584a..c761194 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return bitmap_iterator_count(&it) + (priv->tx_ring_num * 2) + - (priv->rx_ring_num * 2); + (priv->rx_ring_num * 3); case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; + data[index++] = priv->rx_ring[i]->dropped; } spin_unlock_bh(&priv->stats_lock); @@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_dropped", i); } break; case ETH_SS_PRIV_FLAGS: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 3904b5f..20b6c2e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) u64 in_mod = reset << 8 | port; int err; int i, counter_index; + unsigned long sw_rx_dropped = 0; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) @@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) for (i = 0; i < priv->rx_ring_num; i++) { stats->rx_packets += priv->rx_ring[i]->packets; stats->rx_bytes += priv->rx_ring[i]->bytes; + sw_rx_dropped += priv->rx_ring[i]->dropped; priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; @@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) &mlx4_en_stats->MCAST_prio_1, NUM_PRIORITIES); stats->collisions = 0; - stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + + sw_rx_dropped; stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); stats->rx_over_errors = 0; stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 86bcfe5..b723e3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, gfp_t gfp = _gfp; if (order) - gfp |= __GFP_COMP | __GFP_NOWARN; + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; page = alloc_pages(gfp, order); if (likely(page)) break; @@ -126,7 +126,9 @@ out: dma_unmap_page(priv->ddev, page_alloc[i].dma, page_alloc[i].page_size, PCI_DMA_FROMDEVICE); page = page_alloc[i].page; - set_page_count(page, 1); + /* Revert changes done by mlx4_alloc_pages */ + page_ref_sub(page, page_alloc[i].page_size / + priv->frag_info[i].frag_stride - 1); put_page(page); } } @@ -176,7 +178,9 @@ out: dma_unmap_page(priv->ddev, page_alloc->dma, page_alloc->page_size, PCI_DMA_FROMDEVICE); page = page_alloc->page; - set_page_count(page, 1); + /* Revert changes done by mlx4_alloc_pages */ + page_ref_sub(page, page_alloc->page_size / + priv->frag_info[i].frag_stride - 1); put_page(page); page_alloc->page = NULL; } @@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* GRO not possible, complete processing here */ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); if (!skb) { - priv->stats.rx_dropped++; + ring->dropped++; goto next; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 358f723..12c77a7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap return 0; } +static int mlx4_pci_enable_device(struct mlx4_dev *dev) +{ + struct pci_dev *pdev = dev->persist->pdev; + int err = 0; + + mutex_lock(&dev->persist->pci_status_mutex); + if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { + err = pci_enable_device(pdev); + if (!err) + dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; + } + mutex_unlock(&dev->persist->pci_status_mutex); + + return err; +} + +static void mlx4_pci_disable_device(struct mlx4_dev *dev) +{ + struct pci_dev *pdev = dev->persist->pdev; + + mutex_lock(&dev->persist->pci_status_mutex); + if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { + pci_disable_device(pdev); + dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; + } + mutex_unlock(&dev->persist->pci_status_mutex); +} + static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int total_vfs, int *nvfs, struct mlx4_priv *priv, int reset_flow) @@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); - err = pci_enable_device(pdev); + err = mlx4_pci_enable_device(&priv->dev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; @@ -3715,7 +3743,7 @@ err_release_regions: pci_release_regions(pdev); err_disable_pdev: - pci_disable_device(pdev); + mlx4_pci_disable_device(&priv->dev); pci_set_drvdata(pdev, NULL); return err; } @@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) priv->pci_dev_data = id->driver_data; mutex_init(&dev->persist->device_state_mutex); mutex_init(&dev->persist->interface_state_mutex); + mutex_init(&dev->persist->pci_status_mutex); ret = devlink_register(devlink, &pdev->dev); if (ret) @@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) } pci_release_regions(pdev); - pci_disable_device(pdev); + mlx4_pci_disable_device(dev); devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); @@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - pci_disable_device(pdev); + mlx4_pci_disable_device(persist->dev); return PCI_ERS_RESULT_NEED_RESET; } @@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; - struct mlx4_priv *priv = mlx4_priv(dev); - int ret; - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - int total_vfs; + int err; mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); - ret = pci_enable_device(pdev); - if (ret) { - mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); + err = mlx4_pci_enable_device(dev); + if (err) { + mlx4_err(dev, "Can not re-enable device, err=%d\n", err); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); + return PCI_ERS_RESULT_RECOVERED; +} +static void mlx4_pci_resume(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + int err; + + mlx4_err(dev, "%s was called\n", __func__); total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mutex_lock(&persist->interface_state_mutex); if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { - ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, + err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, priv, 1); - if (ret) { - mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", - __func__, ret); + if (err) { + mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", + __func__, err); goto end; } - ret = restore_current_port_types(dev, dev->persist-> + err = restore_current_port_types(dev, dev->persist-> curr_port_type, dev->persist-> curr_port_poss_type); - if (ret) - mlx4_err(dev, "could not restore original port types (%d)\n", ret); + if (err) + mlx4_err(dev, "could not restore original port types (%d)\n", err); } end: mutex_unlock(&persist->interface_state_mutex); - return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static void mlx4_shutdown(struct pci_dev *pdev) @@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev) static const struct pci_error_handlers mlx4_err_handler = { .error_detected = mlx4_pci_err_detected, .slot_reset = mlx4_pci_slot_reset, + .resume = mlx4_pci_resume, }; static struct pci_driver mlx4_driver = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ef96831..c9d7fc51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx { struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; + u8 pptx; + u8 pprx; int disable_mcast_ref[MLX4_MAX_PORTS + 1]; struct mlx4_resource_tracker res_tracker; struct workqueue_struct *comm_wq; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d12ab6a..63b1aea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -323,6 +323,7 @@ struct mlx4_en_rx_ring { unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; + unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 211c650..087b23b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, } gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + /* Slave cannot change Global Pause configuration */ + if (slave != mlx4_master_func_num(dev) && + ((gen_context->pptx != master->pptx) || + (gen_context->pprx != master->pprx))) { + gen_context->pptx = master->pptx; + gen_context->pprx = master->pprx; + mlx4_warn(dev, + "denying Global Pause change for slave:%d\n", + slave); + } else { + master->pptx = gen_context->pptx; + master->pprx = gen_context->pprx; + } break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 518af32..7869465 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp) return false; } +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + /* This function reuses the buffer(from an offset) from * consumer index to producer index in the bd ring */ @@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev, curr_cons->data = NULL; } +/* In case of allocation failures reuse buffers + * from consumer index to produce buffers for firmware + */ +static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, + struct qede_dev *edev, u8 count) +{ + struct sw_rx_data *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + qede_reuse_page(edev, rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + static inline int qede_realloc_rx_buffer(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) @@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, curr_cons->page_offset += rxq->rx_buf_seg_size; if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + /* Since we failed to allocate new buffer + * current buffer can be used again. + */ + curr_cons->page_offset -= rxq->rx_buf_seg_size; + return -ENOMEM; + } dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, PAGE_SIZE, DMA_FROM_DEVICE); @@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev, len_on_bd); if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + /* Incr page ref count to reuse on allocation failure + * so that it doesn't get freed while freeing SKB. + */ + atomic_inc(¤t_bd->data->_count); goto out; } @@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev, return 0; out: + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, edev, 1); return -ENOMEM; } @@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev, tpa_info->skb = netdev_alloc_skb(edev->ndev, le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); tpa_info->agg_state = QEDE_AGG_STATE_ERROR; - return; + goto cons_buf; } skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); @@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev, /* This is needed in order to enable forwarding support */ qede_set_gro_params(edev, tpa_info->skb, cqe); +cons_buf: /* We still need to handle bd_len_list to consume buffers */ if (likely(cqe->ext_bd_len_list[0])) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->ext_bd_len_list[0])); @@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th; - skb_set_network_header(skb, 0); skb_set_transport_header(skb, sizeof(struct iphdr)); th = tcp_hdr(skb); @@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb) struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr *th; - skb_set_network_header(skb, 0); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); th = tcp_hdr(skb); @@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev, struct sk_buff *skb, u16 vlan_tag) { + /* FW can send a single MTU sized packet from gro flow + * due to aggregation timeout/last segment etc. which + * is not expected to be a gro packet. If a skb has zero + * frags then simply push it in the stack as non gso skb. + */ + if (unlikely(!skb->data_len)) { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + goto send_skb; + } + #ifdef CONFIG_INET if (skb_shinfo(skb)->gso_size) { + skb_set_network_header(skb, 0); + switch (skb->protocol) { case htons(ETH_P_IP): qede_gro_ip_csum(skb); @@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev, } } #endif + +send_skb: skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, vlan_tag); } @@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", sw_comp_cons, parse_flag); rxq->rx_hw_errors++; - qede_reuse_page(edev, rxq, sw_rx_data); - goto next_rx; + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); + goto next_cqe; } skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, "Build_skb failed, dropping incoming packet\n"); - qede_reuse_page(edev, rxq, sw_rx_data); + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); rxq->rx_alloc_errors++; - goto next_rx; + goto next_cqe; } /* Copy data into SKB */ @@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) if (unlikely(qede_realloc_rx_buffer(edev, rxq, sw_rx_data))) { DP_ERR(edev, "Failed to allocate rx buffer\n"); + /* Incr page ref count to reuse on allocation + * failure so that it doesn't get freed while + * freeing SKB. + */ + + atomic_inc(&sw_rx_data->data->_count); rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, edev, + fp_cqe->bd_num); + dev_kfree_skb_any(skb); goto next_cqe; } } + qede_rx_bd_ring_consume(rxq); + if (fp_cqe->bd_num != 1) { u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); u8 num_frags; @@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) num_frags--) { u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : pkt_len; + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + qede_recycle_rx_bd_ring(rxq, edev, + num_frags); + dev_kfree_skb_any(skb); + goto next_cqe; + } - WARN_ONCE(!cur_size, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + qede_recycle_rx_bd_ring(rxq, edev, + num_frags); + dev_kfree_skb_any(skb); goto next_cqe; + } - rxq->sw_rx_cons++; sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - qed_chain_consume(&rxq->rx_bd_ring); + qede_rx_bd_ring_consume(rxq); + dma_unmap_page(&edev->pdev->dev, sw_rx_data->mapping, PAGE_SIZE, DMA_FROM_DEVICE); @@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) pkt_len -= cur_size; } - if (pkt_len) + if (unlikely(pkt_len)) DP_ERR(edev, "Mapped all BDs of jumbo, but still have %d bytes\n", pkt_len); @@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); - - qed_chain_consume(&rxq->rx_bd_ring); -next_rx: - rxq->sw_rx_cons++; next_rx_only: rx_pkt++; @@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; struct sw_rx_data *replace_buf = &tpa_info->replace_buf; - if (replace_buf) { + if (replace_buf->data) { dma_unmap_page(&edev->pdev->dev, dma_unmap_addr(replace_buf, mapping), PAGE_SIZE, DMA_FROM_DEVICE); @@ -2377,7 +2440,7 @@ err: static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { - int i, rc, size, num_allocated; + int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; @@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); + rc = -ENOMEM; goto err; } @@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, /* Allocate buffers for the Rx ring */ for (i = 0; i < rxq->num_rx_buffers; i++) { rc = qede_alloc_rx_buffer(edev, rxq); - if (rc) - break; - } - num_allocated = i; - if (!num_allocated) { - DP_ERR(edev, "Rx buffers allocation failed\n"); - goto err; - } else if (num_allocated < rxq->num_rx_buffers) { - DP_NOTICE(edev, - "Allocated less buffers than desired (%d allocated)\n", - num_allocated); + if (rc) { + DP_ERR(edev, + "Rx buffers allocation failed at index %d\n", i); + goto err; + } } - qede_alloc_sge_mem(edev, rxq); - - return 0; - + rc = qede_alloc_sge_mem(edev, rxq); err: - qede_free_mem_rxq(edev, rxq); - return -ENOMEM; + return rc; } static void qede_free_mem_txq(struct qede_dev *edev, @@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, } return 0; - err: - qede_free_mem_fp(edev, fp); - return -ENOMEM; + return rc; } static void qede_free_mem_load(struct qede_dev *edev) @@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev) struct qede_fastpath *fp = &edev->fp_array[rss_id]; rc = qede_alloc_mem_fp(edev, fp); - if (rc) - break; - } - - if (rss_id != QEDE_RSS_CNT(edev)) { - /* Failed allocating memory for all the queues */ - if (!rss_id) { + if (rc) { DP_ERR(edev, - "Failed to allocate memory for the leading queue\n"); - rc = -ENOMEM; - } else { - DP_NOTICE(edev, - "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", - QEDE_RSS_CNT(edev), rss_id); + "Failed to allocate memory for fastpath - rss id = %d\n", + rss_id); + qede_free_mem_load(edev); + return rc; } - edev->num_rss = rss_id; } return 0; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 087e14a..9e2a0bd 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev) rate = clk_get_rate(clk); clk_put(clk); + if (!rate) + return -EINVAL; + inc = 1000000000ULL << 20; do_div(inc, rate); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 004e2d7..ceea74c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev, __func__); return ret; } - ret = sh_eth_dev_init(ndev, false); + ret = sh_eth_dev_init(ndev, true); if (ret < 0) { netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); return ret; } - mdp->irq_enabled = true; - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); - /* Setting the Rx mode will start the Rx process. */ - sh_eth_write(ndev, EDRRR_R, EDRRR); netif_device_attach(ndev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f0d797a..44022b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -34,6 +34,9 @@ #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 +#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028 +#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004 + #define EMAC_SPLITTER_CTRL_REG 0x0 #define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 #define EMAC_SPLITTER_CTRL_SPEED_10 0x2 @@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) int phymode = dwmac->interface; u32 reg_offset = dwmac->reg_offset; u32 reg_shift = dwmac->reg_shift; - u32 ctrl, val; + u32 ctrl, val, module; switch (phymode) { case PHY_INTERFACE_MODE_RGMII: @@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; - if (dwmac->f2h_ptp_ref_clk) + if (dwmac->f2h_ptp_ref_clk) { ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); - else + regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, + &module); + module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2)); + regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, + module); + } else { ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); + } regmap_write(sys_mgr_base_addr, reg_offset, ctrl); + return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 42fdfd4..bbb77cd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev) int i, ret; u32 reg; + pm_runtime_get_sync(&priv->pdev->dev); + if (!cpsw_common_res_usage_state(priv)) cpsw_intr_disable(priv); netif_carrier_off(ndev); - pm_runtime_get_sync(&priv->pdev->dev); - reg = priv->version; dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 5d9abed..58d58f0 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) pdata->hw_ram_addr = auxdata->hw_ram_addr; } - pdev->dev.platform_data = pdata; - return pdata; } @@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_ctlr_destroy(priv->dma); unregister_netdev(ndev); + pm_runtime_disable(&pdev->dev); free_netdev(ndev); return 0; diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index b5d50d4..93ffedf 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi) return -ENOMEM; mutex_init(&ks->lock); - ks->spi = spi_dev_get(spi); + ks->spi = spi; ks->chip = &ks8995_chip[variant]; if (ks->spi->dev.of_node) { diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index bdd83d9..96a5028 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info, }, - /* Huawei E3372 fails unless NDP comes after the IP packets */ - { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + + /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372 + * (12d1:157d), are known to fail unless the NDP is placed + * after the IP packets. Applying the quirk to all Huawei + * devices is broader than necessary, but harmless. + */ + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, }, /* default entry */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b2348f6..db8022a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, union Vmxnet3_GenericDesc *gdesc) { if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { - /* typical case: TCP/UDP over IP and both csums are correct */ - if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == - VMXNET3_RCD_CSUM_OK) { + if (gdesc->rcd.v4 && + (le32_to_cpu(gdesc->dword[3]) & + VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); + BUG_ON(gdesc->rcd.frg); + } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & + (1 << VMXNET3_RCD_TUC_SHIFT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); - BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); BUG_ON(gdesc->rcd.frg); } else { if (gdesc->rcd.csum) { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 729c344..c482539 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040600 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040700 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 9a9fabb..8a8f1e5 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -60,41 +60,6 @@ struct pcpu_dstats { struct u64_stats_sync syncp; }; -static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie) -{ - return dst; -} - -static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - return ip_local_out(net, sk, skb); -} - -static unsigned int vrf_v4_mtu(const struct dst_entry *dst) -{ - /* TO-DO: return max ethernet size? */ - return dst->dev->mtu; -} - -static void vrf_dst_destroy(struct dst_entry *dst) -{ - /* our dst lives forever - or until the device is closed */ -} - -static unsigned int vrf_default_advmss(const struct dst_entry *dst) -{ - return 65535 - 40; -} - -static struct dst_ops vrf_dst_ops = { - .family = AF_INET, - .local_out = vrf_ip_local_out, - .check = vrf_ip_check, - .mtu = vrf_v4_mtu, - .destroy = vrf_dst_destroy, - .default_advmss = vrf_default_advmss, -}; - /* neighbor handling is done with actual device; do not want * to flip skb->dev for those ndisc packets. This really fails * for multiple next protocols (e.g., NEXTHDR_HOP). But it is @@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) } #if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie) -{ - return dst; -} - -static struct dst_ops vrf_dst_ops6 = { - .family = AF_INET6, - .local_out = ip6_local_out, - .check = vrf_ip6_check, - .mtu = vrf_v4_mtu, - .destroy = vrf_dst_destroy, - .default_advmss = vrf_default_advmss, -}; - -static int init_dst_ops6_kmem_cachep(void) -{ - vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache", - sizeof(struct rt6_info), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - - if (!vrf_dst_ops6.kmem_cachep) - return -ENOMEM; - - return 0; -} - -static void free_dst_ops6_kmem_cachep(void) -{ - kmem_cache_destroy(vrf_dst_ops6.kmem_cachep); -} - -static int vrf_input6(struct sk_buff *skb) -{ - skb->dev->stats.rx_errors++; - kfree_skb(skb); - return 0; -} - /* modelled after ip6_finish_output2 */ static int vrf_finish_output6(struct net *net, struct sock *sk, struct sk_buff *skb) @@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } -static void vrf_rt6_destroy(struct net_vrf *vrf) +static void vrf_rt6_release(struct net_vrf *vrf) { - dst_destroy(&vrf->rt6->dst); - free_percpu(vrf->rt6->rt6i_pcpu); + dst_release(&vrf->rt6->dst); vrf->rt6 = NULL; } static int vrf_rt6_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct dst_entry *dst; + struct net *net = dev_net(dev); struct rt6_info *rt6; - int cpu; int rc = -ENOMEM; - rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, - DST_OBSOLETE_NONE, - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + rt6 = ip6_dst_alloc(net, dev, + DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); if (!rt6) goto out; - dst = &rt6->dst; - - rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL); - if (!rt6->rt6i_pcpu) { - dst_destroy(dst); - goto out; - } - for_each_possible_cpu(cpu) { - struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); - *p = NULL; - } - - memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst)); - - INIT_LIST_HEAD(&rt6->rt6i_siblings); - INIT_LIST_HEAD(&rt6->rt6i_uncached); - - rt6->dst.input = vrf_input6; rt6->dst.output = vrf_output6; - - rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); - - atomic_set(&rt6->dst.__refcnt, 2); - + rt6->rt6i_table = fib6_get_table(net, vrf->tb_id); + dst_hold(&rt6->dst); vrf->rt6 = rt6; rc = 0; out: return rc; } #else -static int init_dst_ops6_kmem_cachep(void) -{ - return 0; -} - -static void free_dst_ops6_kmem_cachep(void) -{ -} - -static void vrf_rt6_destroy(struct net_vrf *vrf) +static void vrf_rt6_release(struct net_vrf *vrf) { } @@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) !(IPCB(skb)->flags & IPSKB_REROUTED)); } -static void vrf_rtable_destroy(struct net_vrf *vrf) +static void vrf_rtable_release(struct net_vrf *vrf) { struct dst_entry *dst = (struct dst_entry *)vrf->rth; - dst_destroy(dst); + dst_release(dst); vrf->rth = NULL; } @@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev) struct net_vrf *vrf = netdev_priv(dev); struct rtable *rth; - rth = dst_alloc(&vrf_dst_ops, dev, 2, - DST_OBSOLETE_NONE, - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); if (rth) { rth->dst.output = vrf_output; - rth->rt_genid = rt_genid_ipv4(dev_net(dev)); - rth->rt_flags = 0; - rth->rt_type = RTN_UNICAST; - rth->rt_is_input = 0; - rth->rt_iif = 0; - rth->rt_pmtu = 0; - rth->rt_gateway = 0; - rth->rt_uses_gateway = 0; rth->rt_table_id = vrf->tb_id; - INIT_LIST_HEAD(&rth->rt_uncached); - rth->rt_uncached_list = NULL; } return rth; @@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev) struct net_device *port_dev; struct list_head *iter; - vrf_rtable_destroy(vrf); - vrf_rt6_destroy(vrf); + vrf_rtable_release(vrf); + vrf_rt6_release(vrf); netdev_for_each_lower_dev(dev, port_dev, iter) vrf_del_slave(dev, port_dev); @@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev) return 0; out_rth: - vrf_rtable_destroy(vrf); + vrf_rtable_release(vrf); out_stats: free_percpu(dev->dstats); dev->dstats = NULL; @@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev, struct net_vrf *vrf = netdev_priv(dev); rth = vrf->rth; - atomic_inc(&rth->dst.__refcnt); + dst_hold(&rth->dst); } return rth; @@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, struct net_vrf *vrf = netdev_priv(dev); rt = vrf->rt6; - atomic_inc(&rt->dst.__refcnt); + dst_hold(&rt->dst); } return (struct dst_entry *)rt; @@ -946,19 +826,6 @@ static int __init vrf_init_module(void) { int rc; - vrf_dst_ops.kmem_cachep = - kmem_cache_create("vrf_ip_dst_cache", - sizeof(struct rtable), 0, - SLAB_HWCACHE_ALIGN, - NULL); - - if (!vrf_dst_ops.kmem_cachep) - return -ENOMEM; - - rc = init_dst_ops6_kmem_cachep(); - if (rc != 0) - goto error2; - register_netdevice_notifier(&vrf_notifier_block); rc = rtnl_link_register(&vrf_link_ops); @@ -969,22 +836,10 @@ static int __init vrf_init_module(void) error: unregister_netdevice_notifier(&vrf_notifier_block); - free_dst_ops6_kmem_cachep(); -error2: - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); return rc; } -static void __exit vrf_cleanup_module(void) -{ - rtnl_link_unregister(&vrf_link_ops); - unregister_netdevice_notifier(&vrf_notifier_block); - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); - free_dst_ops6_kmem_cachep(); -} - module_init(vrf_init_module); -module_exit(vrf_cleanup_module); MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 72380af..b0603e7 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core) INIT_WORK(&wl->firmware_load, b43_request_firmware); schedule_work(&wl->firmware_load); -bcma_out: return err; bcma_err_wireless_exit: ieee80211_free_hw(wl->hw); +bcma_out: + kfree(dev); return err; } @@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core) b43_rng_exit(wl); b43_leds_unregister(wl); - ieee80211_free_hw(wl->hw); + kfree(wldev->dev); } static struct bcma_driver b43_bcma_driver = { @@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev) b43_leds_unregister(wl); b43_wireless_exit(dev, wl); + kfree(dev); } static struct ssb_driver b43_ssb_driver = { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 76e649c..a50f4df 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* the fw is stopped, the aux sta is dead: clean up driver state */ iwl_mvm_del_aux_sta(mvm); + iwl_free_fw_paging(mvm); + /* * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * won't be called in this case). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5e8ab79..d278399 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); - iwl_free_fw_paging(mvm); - iwl_mvm_tof_clean(mvm); ieee80211_free_hw(mvm->hw); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index eb39c7e..b2b7935 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) */ val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); if (val & (BIT(1) | BIT(17))) { - IWL_INFO(trans, - "can't access the RSA semaphore it is write protected\n"); + IWL_DEBUG_INFO(trans, + "can't access the RSA semaphore it is write protected\n"); return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 95dcbff..6a8245c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter( for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", - rtldm->thermalvalue, thermal_value); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", + rtldm->thermalvalue, thermal_value); /*Record last Power Tracking Thermal Value*/ rtldm->thermalvalue = thermal_value; } |