summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/flexcan.c18
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c176
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h15
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c77
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c104
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c137
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c109
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c70
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c15
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c14
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c30
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c65
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c28
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c30
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/cx82310_eth.c41
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/sr9800.c1
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c15
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c17
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c12
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c46
-rw-r--r--drivers/net/xen-netfront.c5
138 files changed, 2135 insertions, 1197 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673eb..df51d60 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
making it transparent to the connected L2 switch.
Ipvlan devices can be added using the "ip" command from the
- iproute2 package starting with the iproute2-X.Y.ZZ release:
+ iproute2 package starting with the iproute2-3.19 release:
"ip link add link <main-dev> [ NAME ] type ipvlan"
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5..dc6b78e 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
config LTPC
tristate "Apple/Farallon LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+ depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
help
This allows you to use the AppleTalk PC card to connect to LocalTalk
networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b979c26..089a402 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb->queue_mapping) {
- if (bond_slave_can_tx(slave)) {
+ if (bond_slave_is_up(slave) &&
+ slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 98d73aa..58808f6 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -131,7 +131,7 @@ config CAN_RCAR
config CAN_XILINXCAN
tristate "Xilinx CAN"
- depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
depends on COMMON_CLK && HAS_IOMEM
---help---
Xilinx CAN driver. This driver supports both soft AXI CAN IP and
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c82e02..b0f6924 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 80c46ad..ad0a7e8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
new_state = max(tx_state, rx_state);
- } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
+ } else {
__flexcan_get_berr_counter(dev, &bec);
- new_state = CAN_STATE_ERROR_PASSIVE;
+ new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
+ CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
- } else {
- new_state = CAN_STATE_BUS_OFF;
}
/* state hasn't changed */
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
+ struct regulator *reg_xceiver;
struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
void __iomem *base;
int err, irq;
u32 clock_freq = 0;
+ reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(reg_xceiver))
+ reg_xceiver = NULL;
+
if (pdev->dev.of_node)
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
- priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
- if (IS_ERR(priv->reg_xceiver))
- priv->reg_xceiver = NULL;
+ priv->reg_xceiver = reg_xceiver;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 009acc8..8b4d3e6 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
init_usb_anchor(&dev->rx_submitted);
atomic_set(&dev->active_channels, 0);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 2928f70..57611fd 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,8 @@
* Copyright (C) 2015 Valeo S.A.
*/
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -23,7 +25,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#define MAX_TX_URBS 16
#define MAX_RX_URBS 4
#define START_TIMEOUT 1000 /* msecs */
#define STOP_TIMEOUT 1000 /* msecs */
@@ -441,6 +442,7 @@ struct kvaser_usb_error_summary {
};
};
+/* Context for an outstanding, not yet ACKed, transmission */
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
@@ -454,8 +456,13 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
+ /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
+ * not yet ACKed, transmissions on this device. This value is
+ * also used as a sentinel for marking free tx contexts.
+ */
u32 fw_version;
unsigned int nchannels;
+ unsigned int max_tx_urbs;
enum kvaser_usb_family family;
bool rxinitdone;
@@ -465,18 +472,18 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
-
- atomic_t active_tx_urbs;
- struct usb_anchor tx_submitted;
- struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
-
- struct completion start_comp, stop_comp;
+ struct can_berr_counter bec;
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
- struct can_berr_counter bec;
+ struct completion start_comp, stop_comp;
+ struct usb_anchor tx_submitted;
+
+ spinlock_t tx_contexts_lock;
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
};
static const struct usb_device_id kvaser_usb_table[] = {
@@ -584,8 +591,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
while (pos <= actual_len - MSG_HEADER_LEN) {
tmp = buf + pos;
- if (!tmp->len)
- break;
+ /* Handle messages crossing the USB endpoint max packet
+ * size boundary. Check kvaser_usb_read_bulk_callback()
+ * for further details.
+ */
+ if (tmp->len == 0) {
+ pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+ wMaxPacketSize));
+ continue;
+ }
if (pos + tmp->len > actual_len) {
dev_err(dev->udev->dev.parent,
@@ -647,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
switch (dev->family) {
case KVASER_LEAF:
dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
break;
}
@@ -686,6 +704,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
struct sk_buff *skb;
struct can_frame *cf;
+ unsigned long flags;
u8 channel, tid;
channel = msg->u.tx_acknowledge_header.channel;
@@ -704,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
- context = &priv->tx_contexts[tid % MAX_TX_URBS];
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms &&
@@ -729,12 +748,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
stats->tx_packets++;
stats->tx_bytes += context->dlc;
- can_get_echo_skb(priv->netdev, context->echo_index);
- context->echo_index = MAX_TX_URBS;
- atomic_dec(&priv->active_tx_urbs);
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
}
static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -787,7 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
netdev_err(netdev, "Error transmitting URB\n");
usb_unanchor_urb(urb);
usb_free_urb(urb);
- kfree(buf);
return err;
}
@@ -796,17 +817,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
return 0;
}
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
-{
- int i;
-
- usb_kill_anchored_urbs(&priv->tx_submitted);
- atomic_set(&priv->active_tx_urbs, 0);
-
- for (i = 0; i < MAX_TX_URBS; i++)
- priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-}
-
static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_error_summary *es,
struct can_frame *cf)
@@ -1317,8 +1327,20 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
while (pos <= urb->actual_length - MSG_HEADER_LEN) {
msg = urb->transfer_buffer + pos;
- if (!msg->len)
- break;
+ /* The Kvaser firmware can only read and write messages that
+ * does not cross the USB's endpoint wMaxPacketSize boundary.
+ * If a follow-up command crosses such boundary, firmware puts
+ * a placeholder zero-length command in its place then aligns
+ * the real command to the next max packet size.
+ *
+ * Handle such cases or we're going to miss a significant
+ * number of events in case of a heavy rx load on the bus.
+ */
+ if (msg->len == 0) {
+ pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+ wMaxPacketSize));
+ continue;
+ }
if (pos + msg->len > urb->actual_length) {
dev_err(dev->udev->dev.parent, "Format error\n");
@@ -1326,7 +1348,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
}
kvaser_usb_handle_message(dev, msg);
-
pos += msg->len;
}
@@ -1498,6 +1519,26 @@ error:
return err;
}
+static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
+{
+ int i, max_tx_urbs;
+
+ max_tx_urbs = priv->dev->max_tx_urbs;
+
+ priv->active_tx_contexts = 0;
+ for (i = 0; i < max_tx_urbs; i++)
+ priv->tx_contexts[i].echo_index = max_tx_urbs;
+}
+
+/* This method might sleep. Do not call it in the atomic context
+ * of URB completions.
+ */
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+}
+
static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
{
int i;
@@ -1615,9 +1656,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
struct urb *urb;
void *buf;
struct kvaser_msg *msg;
- int i, err;
- int ret = NETDEV_TX_OK;
+ int i, err, ret = NETDEV_TX_OK;
u8 *msg_tx_can_flags = NULL; /* GCC */
+ unsigned long flags;
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -1634,7 +1675,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
- goto nobufmem;
+ goto freeurb;
}
msg = buf;
@@ -1671,22 +1712,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
- for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
- if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ for (i = 0; i < dev->max_tx_urbs; i++) {
+ if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
context = &priv->tx_contexts[i];
+
+ context->echo_index = i;
+ can_put_echo_skb(skb, netdev, context->echo_index);
+ ++priv->active_tx_contexts;
+ if (priv->active_tx_contexts >= dev->max_tx_urbs)
+ netif_stop_queue(netdev);
+
break;
}
}
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
/* This should never happen; it implies a flow control bug */
if (!context) {
netdev_warn(netdev, "cannot find free context\n");
+
+ kfree(buf);
ret = NETDEV_TX_BUSY;
- goto releasebuf;
+ goto freeurb;
}
context->priv = priv;
- context->echo_index = i;
context->dlc = cf->can_dlc;
msg->u.tx_can.tid = context->echo_index;
@@ -1698,18 +1749,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
kvaser_usb_write_bulk_callback, context);
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
-
- atomic_inc(&priv->active_tx_urbs);
-
- if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
- netif_stop_queue(netdev);
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err)) {
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
- atomic_dec(&priv->active_tx_urbs);
usb_unanchor_urb(urb);
stats->tx_dropped++;
@@ -1719,16 +1769,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
else
netdev_warn(netdev, "Failed tx_urb %d\n", err);
- goto releasebuf;
+ goto freeurb;
}
- usb_free_urb(urb);
-
- return NETDEV_TX_OK;
+ ret = NETDEV_TX_OK;
-releasebuf:
- kfree(buf);
-nobufmem:
+freeurb:
usb_free_urb(urb);
return ret;
}
@@ -1840,13 +1886,15 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
struct kvaser_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
- int i, err;
+ int err;
err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
if (err)
return err;
- netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+ netdev = alloc_candev(sizeof(*priv) +
+ dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ dev->max_tx_urbs);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
return -ENOMEM;
@@ -1854,19 +1902,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
priv = netdev_priv(netdev);
+ init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
- init_usb_anchor(&priv->tx_submitted);
- atomic_set(&priv->active_tx_urbs, 0);
-
- for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
- priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-
priv->dev = dev;
priv->netdev = netdev;
priv->channel = channel;
+ spin_lock_init(&priv->tx_contexts_lock);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = CAN_USB_CLOCK;
priv->can.bittiming_const = &kvaser_usb_bittiming_const;
@@ -1976,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
return err;
}
+ dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+ ((dev->fw_version >> 24) & 0xff),
+ ((dev->fw_version >> 16) & 0xff),
+ (dev->fw_version & 0xffff));
+
+ dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
+
err = kvaser_usb_get_card_info(dev);
if (err) {
dev_err(&intf->dev,
@@ -1983,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
return err;
}
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
-
for (i = 0; i < dev->nchannels; i++) {
err = kvaser_usb_init_one(intf, id, i);
if (err) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index 1ba7c25..e8fc495 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -26,8 +26,8 @@
#define PUCAN_CMD_FILTER_STD 0x008
#define PUCAN_CMD_TX_ABORT 0x009
#define PUCAN_CMD_WR_ERR_CNT 0x00a
-#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
-#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
+#define PUCAN_CMD_SET_EN_OPTION 0x00b
+#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
/* uCAN received messages list */
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
u16 unused;
};
-/* uCAN RX_FRAME_ENABLE command fields */
-#define PUCAN_FLTEXT_ERROR 0x0001
-#define PUCAN_FLTEXT_BUSLOAD 0x0002
+/* uCAN SET_EN/CLR_DIS _OPTION command fields */
+#define PUCAN_OPTION_ERROR 0x0001
+#define PUCAN_OPTION_BUSLOAD 0x0002
+#define PUCAN_OPTION_CANDFDISO 0x0004
-struct __packed pucan_filter_ext {
+struct __packed pucan_options {
__le16 opcode_channel;
- __le16 ext_mask;
+ __le16 options;
u32 unused;
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 962c3f0..a9221ad 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
u8 unused[5];
};
-/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
+/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
-struct __packed pcan_ufd_filter_ext {
+struct __packed pcan_ufd_options {
__le16 opcode_channel;
- __le16 ext_mask;
+ __le16 ucan_mask;
u16 unused;
__le16 usb_mask;
};
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
/* moves the pointer forward */
pc += sizeof(struct pucan_wr_err_cnt);
+ /* add command to switch from ISO to non-ISO mode, if fw allows it */
+ if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
+ struct pucan_options *puo = (struct pucan_options *)pc;
+
+ puo->opcode_channel =
+ (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
+ pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_CLR_DIS_OPTION) :
+ pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
+
+ puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
+
+ /* to be sure that no other extended bits will be taken into
+ * account
+ */
+ puo->unused = 0;
+
+ /* moves the pointer forward */
+ pc += sizeof(struct pucan_options);
+ }
+
/* next, go back to operational mode */
cmd = (struct pucan_command *)pc;
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
return pcan_usb_fd_send_cmd(dev, cmd);
}
-/* set/unset notifications filter:
+/* set/unset options
*
- * onoff sets(1)/unset(0) notifications
- * mask each bit defines a kind of notification to set/unset
+ * onoff set(1)/unset(0) options
+ * mask each bit defines a kind of options to set/unset
*/
-static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
- bool onoff, u16 ext_mask, u16 usb_mask)
+static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
+ bool onoff, u16 ucan_mask, u16 usb_mask)
{
- struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
+ struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
- (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
- PUCAN_CMD_RX_FRAME_DISABLE);
+ (onoff) ? PUCAN_CMD_SET_EN_OPTION :
+ PUCAN_CMD_CLR_DIS_OPTION);
- cmd->ext_mask = cpu_to_le16(ext_mask);
+ cmd->ucan_mask = cpu_to_le16(ucan_mask);
cmd->usb_mask = cpu_to_le16(usb_mask);
/* send the command */
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
&pcan_usb_pro_fd);
/* enable USB calibration messages */
- err = pcan_usb_fd_set_filter_ext(dev, 1,
- PUCAN_FLTEXT_ERROR,
- PCAN_UFD_FLTEXT_CALIBRATION);
+ err = pcan_usb_fd_set_options(dev, 1,
+ PUCAN_OPTION_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
}
pdev->usb_if->dev_opened_count++;
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
/* turn off special msgs for that interface if no other dev opened */
if (pdev->usb_if->dev_opened_count == 1)
- pcan_usb_fd_set_filter_ext(dev, 0,
- PUCAN_FLTEXT_ERROR,
- PCAN_UFD_FLTEXT_CALIBRATION);
+ pcan_usb_fd_set_options(dev, 0,
+ PUCAN_OPTION_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
pdev->usb_if->dev_opened_count--;
return 0;
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
pdev->usb_if->fw_info.fw_version[2],
dev->adapter->ctrl_count);
- /* the currently supported hw is non-ISO */
- dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+ /* check for ability to switch between ISO/non-ISO modes */
+ if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+ /* firmware >= 2.x supports ISO/non-ISO switching */
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+ } else {
+ /* firmware < 2.x only supports fixed(!) non-ISO */
+ dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
+ }
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
@@ -879,6 +906,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
pdev->usb_if = ppdev->usb_if;
pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
+
+ /* do a copy of the ctrlmode[_supported] too */
+ dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
+ dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
@@ -933,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
if (dev->ctrl_idx == 0) {
/* turn off calibration message if any device were opened */
if (pdev->usb_if->dev_opened_count > 0)
- pcan_usb_fd_set_filter_ext(dev, 0,
- PUCAN_FLTEXT_ERROR,
- PCAN_UFD_FLTEXT_CALIBRATION);
+ pcan_usb_fd_set_options(dev, 0,
+ PUCAN_OPTION_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
/* tell USB adapter that the driver is being unloaded */
pcan_usb_fd_drv_loaded(dev, 0);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index ee9f650..7b7053d 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
{ \
u32 indir, dir; \
spin_lock(&priv->indir_lock); \
- indir = reg_readl(priv, REG_DIR_DATA_READ); \
dir = __raw_readl(priv->name + off); \
+ indir = reg_readl(priv, REG_DIR_DATA_READ); \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05..ec6eac1 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
link->open++;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ax_open(dev);
} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d..2777289 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ei_open(dev);
} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c..6725dc0 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;
- while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+ (count < limit)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
int rxcomplete = 0;
- int txcomplete = 0;
unsigned long int flags;
- txcomplete = tse_tx_complete(priv);
+ tse_tx_complete(priv);
rxcomplete = tse_rx(priv, budget);
- if (rxcomplete >= budget || txcomplete > 0)
- return rxcomplete;
+ if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
- netdev_dbg(priv->dev,
- "NAPI Complete, did %d packets with budget %d\n",
- txcomplete+rxcomplete, budget);
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ rxcomplete, budget);
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
- priv->dmaops->enable_rxirq(priv);
- priv->dmaops->enable_txirq(priv);
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- return rxcomplete + txcomplete;
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ }
+ return rxcomplete;
}
/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct altera_tse_private *priv;
- unsigned long int flags;
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
}
priv = netdev_priv(dev);
- /* turn off desc irqs and enable napi rx */
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ spin_lock(&priv->rxdma_irq_lock);
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
if (likely(napi_schedule_prep(&priv->napi))) {
+ spin_lock(&priv->rxdma_irq_lock);
priv->dmaops->disable_rxirq(priv);
priv->dmaops->disable_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
__napi_schedule(&priv->napi);
}
- /* reset IRQs */
- priv->dmaops->clear_rxirq(priv);
- priv->dmaops->clear_txirq(priv);
-
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
return IRQ_HANDLED;
}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
- &priv->rx_fifo_depth)) {
+ &priv->tx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO;
goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e65..15a8190 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
struct pcnet32_private *lp;
int i, media;
- int fdx, mii, fset, dxsuflo;
+ int fdx, mii, fset, dxsuflo, sram;
int chip_version;
char *chipname;
struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
/* initialize variables */
- fdx = mii = fset = dxsuflo = 0;
+ fdx = mii = fset = dxsuflo = sram = 0;
chip_version = (chip_version >> 12) & 0xffff;
switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chipname = "PCnet/FAST III 79C973"; /* PCI */
fdx = 1;
mii = 1;
+ sram = 1;
break;
case 0x2626:
chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chipname = "PCnet/FAST III 79C975"; /* PCI */
fdx = 1;
mii = 1;
+ sram = 1;
break;
case 0x2628:
chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dxsuflo = 1;
}
+ /*
+ * The Am79C973/Am79C975 controllers come with 12K of SRAM
+ * which we can use for the Tx/Rx buffers but most importantly,
+ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
+ * Tx fifo underflows.
+ */
+ if (sram) {
+ /*
+ * The SRAM is being configured in two steps. First we
+ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
+ * to the datasheet, each bit corresponds to a 512-byte
+ * page so we can have at most 24 pages. The SRAM_SIZE
+ * holds the value of the upper 8 bits of the 16-bit SRAM size.
+ * The low 8-bits start at 0x00 and end at 0xff. So the
+ * address range is from 0x0000 up to 0x17ff. Therefore,
+ * the SRAM_SIZE is set to 0x17. The next step is to set
+ * the BCR26:SRAM_BND midway through so the Tx and Rx
+ * buffers can share the SRAM equally.
+ */
+ a->write_bcr(ioaddr, 25, 0x17);
+ a->write_bcr(ioaddr, 26, 0xc);
+ /* And finally enable the NOUFLO bit */
+ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
+ }
+
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d440..885b02b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
}
}
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
return -EINVAL;
}
- phy_stop(pdata->phydev);
-
spin_lock_irqsave(&pdata->lock, flags);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 0);
- /* Powerdown Tx/Rx */
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
+ xgbe_napi_disable(pdata, 0);
+
+ phy_stop(pdata->phydev);
+
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
phy_start(pdata->phydev);
- /* Enable Tx/Rx */
+ xgbe_napi_enable(pdata, 0);
+
hw_if->powerup_tx(pdata);
hw_if->powerup_rx(pdata);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_attach(netdev);
- xgbe_napi_enable(pdata, 0);
netif_tx_start_all_queues(netdev);
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct net_device *netdev = pdata->netdev;
+ int ret;
DBGPR("-->xgbe_start\n");
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
phy_start(pdata->phydev);
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
xgbe_init_tx_timers(pdata);
- xgbe_napi_enable(pdata, 1);
netif_tx_start_all_queues(netdev);
DBGPR("<--xgbe_start\n");
return 0;
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ return ret;
}
static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_stop\n");
- phy_stop(pdata->phydev);
-
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 1);
xgbe_stop_tx_timers(pdata);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int i;
-
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->dev_irq);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- synchronize_irq(channel->dma_irq);
- }
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
xgbe_start(pdata);
DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel = NULL;
- unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
- /* Request interrupts */
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
- goto err_rings;
- }
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev,
- "error requesting irq %d\n",
- channel->dma_irq);
- goto err_irq;
- }
- }
- }
-
ret = xgbe_start(pdata);
if (ret)
- goto err_start;
+ goto err_rings;
DBGPR("<--xgbe_open\n");
return 0;
-err_start:
- hw_if->exit(pdata);
-
-err_irq:
- if (pdata->per_channel_irq) {
- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
err_rings:
desc_if->free_ring_resources(pdata);
@@ -1399,30 +1424,16 @@ err_phy_init:
static int xgbe_close(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- unsigned int i;
DBGPR("-->xgbe_close\n");
/* Stop the device */
xgbe_stop(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupts */
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 869d97f..b927021 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (!efi_enabled(EFI_BOOT)) {
+ if (pdata->clk) {
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4de62b2..635a83b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", },
+ { "APMC0D30", },
+ { "APMC0D31", },
{ }
};
MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#ifdef CONFIG_OF
static struct of_device_id xgene_enet_of_match[] = {
{.compatible = "apm,xgene-enet",},
+ {.compatible = "apm,xgene1-sgenet",},
+ {.compatible = "apm,xgene1-xgenet",},
{},
};
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 21206d3..a7f2cc3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- int tx_work_done, rx_work_done;
+ int rx_work_done;
priv = container_of(napi, struct bcm_enet_priv, napi);
dev = priv->net_dev;
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- tx_work_done = bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
spin_unlock(&priv->rx_lock);
- if (rx_work_done >= budget || tx_work_done > 0) {
- /* rx/tx queue is not yet empty/clean */
+ if (rx_work_done >= budget) {
+ /* rx queue is not yet empty/clean */
return rx_work_done;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4..783543a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
- STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+ STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
s = &bcm_sysport_gstrings_stats[i];
switch (s->type) {
case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_SOFT:
continue;
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417..7e3d87a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_SOFT,
};
/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
#define STAT_RXCHK(str, m, ofs) { \
.stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 676ffe0..0469f72 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
slot->skb = skb;
slot->dma_addr = dma_addr;
- if (slot->dma_addr & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
-
return 0;
}
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring->mmio_base);
goto err_dma_free;
}
- if (ring->dma_base & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
err = -ENOMEM;
goto err_dma_free;
}
- if (ring->dma_base & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
BGMAC_DMA_RING_RX);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 756053c..4085c4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1811,7 +1811,7 @@ struct bnx2x {
int stats_state;
/* used for synchronization of concurrent threads statistics handling */
- spinlock_t stats_lock;
+ struct mutex stats_lock;
/* used by dmae command loader */
struct dmae_command stats_dmae;
@@ -1935,8 +1935,6 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
- bool stats_started;
- struct semaphore stats_sema;
u8 phys_port_id[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7155e1d..1ec635f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
u32 xmac_val;
u32 emac_addr;
u32 emac_val;
- u32 umac_addr;
- u32 umac_val;
+ u32 umac_addr[2];
+ u32 umac_val[2];
u32 bmac_addr;
u32 bmac_val[2];
};
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
return 0;
}
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << BP_ABS_FUNC(bp));
+}
+
static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
- if (!CHIP_IS_E1x(bp))
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+ bnx2x_clean_pglue_errors(bp);
bnx2x_init_block(bp, BLOCK_ATC, init_phase);
bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
return base + (BP_ABS_FUNC(bp)) * stride;
}
+static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
+ u8 port, u32 reset_reg,
+ struct bnx2x_mac_vals *vals)
+{
+ u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+ u32 base_addr;
+
+ if (!(mask & reset_reg))
+ return false;
+
+ BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
+ base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
+ REG_WR(bp, vals->umac_addr[port], 0);
+
+ return true;
+}
+
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
struct bnx2x_mac_vals *vals)
{
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
u8 port = BP_PORT(bp);
/* reset addresses as they also mark which values were changed */
- vals->bmac_addr = 0;
- vals->umac_addr = 0;
- vals->xmac_addr = 0;
- vals->emac_addr = 0;
+ memset(vals, 0, sizeof(*vals));
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
- mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
- if (mask & reset_reg) {
- BNX2X_DEV_INFO("Disable umac Rx\n");
- base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
- vals->umac_val = REG_RD(bp, vals->umac_addr);
- REG_WR(bp, vals->umac_addr, 0);
- mac_stopped = true;
- }
+
+ mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
+ reset_reg, vals);
+ mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
+ reset_reg, vals);
}
if (mac_stopped)
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
- /* close LLH filters towards the BRB */
+ /* close LLH filters for both ports towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ bp->link_params.port ^= 1;
bnx2x_set_rx_filter(&bp->link_params, 0);
+ bp->link_params.port ^= 1;
/* Check if the UNDI driver was previously loaded */
if (bnx2x_prev_is_after_undi(bp)) {
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
if (mac_vals.xmac_addr)
REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
- if (mac_vals.umac_addr)
- REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.umac_addr[0])
+ REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
+ if (mac_vals.umac_addr[1])
+ REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
if (mac_vals.emac_addr)
REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
if (mac_vals.bmac_addr) {
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
return bnx2x_prev_mcp_done(bp);
}
-/* previous driver DMAE transaction may have occurred when pre-boot stage ended
- * and boot began, or when kdump kernel was loaded. Either case would invalidate
- * the addresses of the transaction, resulting in was-error bit set in the pci
- * causing all hw-to-host pcie transactions to timeout. If this happened we want
- * to clear the interrupt which detected this from the pglueb and the was done
- * bit
- */
-static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
-{
- if (!CHIP_IS_E1x(bp)) {
- u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
- if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- DP(BNX2X_MSG_SP,
- "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
- 1 << BP_FUNC(bp));
- }
- }
-}
-
static int bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
/* clear hw from errors which may have resulted from an interrupted
* dmae transaction.
*/
- bnx2x_prev_interrupted_dmae(bp);
+ bnx2x_clean_pglue_errors(bp);
/* Release previously held locks */
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
mutex_init(&bp->drv_info_mutex);
+ mutex_init(&bp->stats_lock);
bp->drv_info_mng_owner = false;
- spin_lock_init(&bp->stats_lock);
- sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12722,6 +12731,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+ /* Set PCIe reset type to fundamental for EEH recovery */
+ pdev->needs_freset = 1;
+
/* AER (Advanced Error reporting) configuration */
rc = pci_enable_pcie_error_reporting(pdev);
if (!rc)
@@ -12766,7 +12778,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
- if (!CHIP_IS_E1x(bp)) {
+ if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
dev->hw_enc_features =
@@ -13665,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
- spin_lock_bh(&bp->stats_lock);
+ mutex_lock(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
- spin_unlock_bh(&bp->stats_lock);
+ mutex_unlock(&bp->stats_lock);
bnx2x_save_statistics(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e5aca2d..cfe3c769 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
cookie.vf = vf;
cookie.state = VF_ACQUIRED;
- bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ if (rc)
+ goto op_err;
}
DP(BNX2X_MSG_IOV, "set state to acquired\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index d160829..800ab44 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
*/
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
- if (!bp->stats_pending) {
- int rc;
+ int rc;
- spin_lock_bh(&bp->stats_lock);
-
- if (bp->stats_pending) {
- spin_unlock_bh(&bp->stats_lock);
- return;
- }
-
- bp->fw_stats_req->hdr.drv_stats_counter =
- cpu_to_le16(bp->stats_counter++);
+ if (bp->stats_pending)
+ return;
- DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
- le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
+ bp->fw_stats_req->hdr.drv_stats_counter =
+ cpu_to_le16(bp->stats_counter++);
- /* adjust the ramrod to include VF queues statistics */
- bnx2x_iov_adjust_stats_req(bp);
- bnx2x_dp_stats(bp);
+ DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
+ le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
- /* send FW stats ramrod */
- rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
- U64_HI(bp->fw_stats_req_mapping),
- U64_LO(bp->fw_stats_req_mapping),
- NONE_CONNECTION_TYPE);
- if (rc == 0)
- bp->stats_pending = 1;
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
- spin_unlock_bh(&bp->stats_lock);
- }
+ /* send FW stats ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
+ if (rc == 0)
+ bp->stats_pending = 1;
}
static void bnx2x_hw_stats_post(struct bnx2x *bp)
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
*/
/* should be called under stats_sema */
-static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
}
/* should be called under stats_sema */
-static void __bnx2x_stats_start(struct bnx2x *bp)
+static void bnx2x_stats_start(struct bnx2x *bp)
{
if (IS_PF(bp)) {
if (bp->port.pmf)
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
}
-
- bp->stats_started = true;
-}
-
-static void bnx2x_stats_start(struct bnx2x *bp)
-{
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- __bnx2x_stats_pmf_update(bp);
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
-}
-
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
-{
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
- __bnx2x_stats_pmf_update(bp);
- up(&bp->stats_sema);
+ bnx2x_stats_pmf_update(bp);
+ bnx2x_stats_start(bp);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
*/
if (IS_VF(bp))
return;
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
+
bnx2x_stats_comp(bp);
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
+ bnx2x_stats_start(bp);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- /* we run update from timer context, so give up
- * if somebody is in the middle of transition
- */
- if (down_trylock(&bp->stats_sema))
+ if (bnx2x_edebug_stats_stopped(bp))
return;
- if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
- goto out;
-
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
- goto out;
+ return;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
- goto out;
+ return;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
/* vf is done */
if (IS_VF(bp))
- goto out;
+ return;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
-
-out:
- up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
static void bnx2x_stats_stop(struct bnx2x *bp)
{
- int update = 0;
-
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
-
- bp->stats_started = false;
+ bool update = false;
bnx2x_stats_comp(bp);
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
-
- up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1410,18 +1363,28 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
- enum bnx2x_stats_state state;
- void (*action)(struct bnx2x *bp);
+ enum bnx2x_stats_state state = bp->stats_state;
+
if (unlikely(bp->panic))
return;
- spin_lock_bh(&bp->stats_lock);
- state = bp->stats_state;
+ /* Statistics update run from timer context, and we don't want to stop
+ * that context in case someone is in the middle of a transition.
+ * For other events, wait a bit until lock is taken.
+ */
+ if (!mutex_trylock(&bp->stats_lock)) {
+ if (event == STATS_EVENT_UPDATE)
+ return;
+
+ DP(BNX2X_MSG_STATS,
+ "Unlikely stats' lock contention [event %d]\n", event);
+ mutex_lock(&bp->stats_lock);
+ }
+
+ bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
- action = bnx2x_stats_stm[state][event].action;
- spin_unlock_bh(&bp->stats_lock);
- action(bp);
+ mutex_unlock(&bp->stats_lock);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
}
}
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
- void (func_to_exec)(void *cookie),
- void *cookie){
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie)
+{
+ int cnt = 10, rc = 0;
+
+ /* Wait for statistics to end [while blocking further requests],
+ * then run supplied function 'safely'.
+ */
+ mutex_lock(&bp->stats_lock);
+
bnx2x_stats_comp(bp);
+ while (bp->stats_pending && cnt--)
+ if (bnx2x_storm_stats_update(bp))
+ usleep_range(1000, 2000);
+ if (bp->stats_pending) {
+ BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
func_to_exec(cookie);
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
+
+out:
+ /* No need to restart statistics - if they're enabled, the timer
+ * will restart the statistics.
+ */
+ mutex_unlock(&bp->stats_lock);
+
+ return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2beceae..965539a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,9 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
- void (func_to_exec)(void *cookie),
- void *cookie);
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46b..6befde6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
+ BCMGENET_STAT_SOFT,
};
struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
#define STAT_GENET_MISC(str, m, offset) { \
.stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
- STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_SOFT:
continue;
case BCMGENET_STAT_MIB_RX:
case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
}
/* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int pkts_compl = 0;
unsigned int bds_compl;
unsigned int c_index;
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = ring->cbs + last_c_index;
bds_compl = 0;
if (tx_cb_ptr->skb) {
+ pkts_compl++;
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
last_c_index &= (num_tx_bds - 1);
}
- if (ring->free_bds > (MAX_SKB_FRAGS + 1))
- ring->int_disable(priv, ring);
-
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+ }
ring->c_index = c_index;
+
+ return pkts_compl;
}
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ unsigned int released;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
- __bcmgenet_tx_reclaim(dev, ring);
+ released = __bcmgenet_tx_reclaim(dev, ring);
spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_tx_ring *ring =
+ container_of(napi, struct bcmgenet_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+ if (work_done == 0) {
+ napi_complete(napi);
+ ring->int_enable(ring->priv, ring);
+
+ return 0;
+ }
+
+ return budget;
}
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
- if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- ring->int_enable(priv, ring);
- }
out:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
struct device *kdev = &priv->pdev->dev;
int ret;
u32 reg, cpu_mask_clear;
+ int index;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+ for (index = 0; index < priv->hw_params->tx_queues; index++)
+ bcmgenet_intrl2_1_writel(priv, (1 << index),
+ INTRL2_CPU_MASK_CLEAR);
+
/* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
unsigned int first_bd;
spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
ring->index = index;
if (index == DESC_INDEX) {
ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
DMA_END_ADDR);
+
+ napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
}
/* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
return ret;
}
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
kfree(priv->tx_cbs);
}
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_fini_tx_ring(priv, i);
+
+ __bcmgenet_fini_dma(priv);
+}
+
/* init_edma: Initialize DMA control register */
static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
{
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->tx_cbs) {
- bcmgenet_fini_dma(priv);
+ __bcmgenet_fini_dma(priv);
return -ENOMEM;
}
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
struct bcmgenet_priv, napi);
unsigned int work_done;
- /* tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
work_done = bcmgenet_desc_rx(priv, budget);
/* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
+ struct bcmgenet_tx_ring *ring;
unsigned int index;
/* Save irq status for bottom-half processing. */
priv->irq1_stat =
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
- ~priv->int1_mask;
+ ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
/* Check the MBDONE interrupts.
* packet is done, reclaim descriptors
*/
- if (priv->irq1_stat & 0x0000ffff) {
- index = 0;
- for (index = 0; index < 16; index++) {
- if (priv->irq1_stat & (1 << index))
- bcmgenet_tx_reclaim(priv->dev,
- &priv->tx_rings[index]);
+ for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ if (!(priv->irq1_stat & BIT(index)))
+ continue;
+
+ ring = &priv->tx_rings[index];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
}
}
+
return IRQ_HANDLED;
}
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
if (priv->irq0_stat &
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
- /* Tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
+ }
}
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec..0d370d1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
+ struct napi_struct napi; /* NAPI per tx queue */
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
struct bcmgenet_tx_ring *);
void (*int_disable)(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *);
+ struct bcmgenet_priv *priv;
};
/* device context */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 149a0d7..b971229 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
return -EINVAL;
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (wol->wolopts & WAKE_MAGICSECURE) {
bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
UMAC_MPD_PW_MS);
bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
UMAC_MPD_PW_LS);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_PW_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ } else {
+ reg &= ~MPD_PW_EN;
}
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ad76b8e..81d4153 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = {
};
#if defined(CONFIG_OF)
-static struct macb_config pc302gem_config = {
+static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
};
-static struct macb_config sama5d3_config = {
+static const struct macb_config sama5d3_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
};
-static struct macb_config sama5d4_config = {
+static const struct macb_config sama5d4_config = {
.caps = 0,
.dma_burst_length = 4,
};
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp)
if (bp->pdev->dev.of_node) {
match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
if (match && match->data) {
- config = (const struct macb_config *)match->data;
+ config = match->data;
bp->caps = config->caps;
/*
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 31dc080..ff85619 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -351,7 +351,7 @@
/* Bitfields in MID */
#define MACB_IDNUM_OFFSET 16
-#define MACB_IDNUM_SIZE 16
+#define MACB_IDNUM_SIZE 12
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a84..c308429 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
}
static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
- int addr_len)
+ u8 v6)
{
- return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
- ipv6_clip_hash(ctbl, addr);
+ return v6 ? ipv6_clip_hash(ctbl, addr) :
+ ipv4_clip_hash(ctbl, addr);
}
static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
- int ret = 0;
+ int ret = -1;
if (!ctbl)
return 0;
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
-
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
spin_lock_init(&ce->lock);
atomic_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
- ce->addr_len = addr_len;
- memcpy(ce->addr, lip, addr_len);
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) {
+ ce->addr6.sin6_family = AF_INET6;
+ memcpy(ce->addr6.sin6_addr.s6_addr,
+ lip, sizeof(struct in6_addr));
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
if (ret) {
write_unlock_bh(&ctbl->lock);
return ret;
}
+ } else {
+ ce->addr.sin_family = AF_INET;
+ memcpy((char *)(&ce->addr.sin_addr), lip,
+ sizeof(struct in_addr));
}
} else {
write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
-
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
+ int ret = -1;
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
for (i = 0 ; i < ctbl->clipt_size; ++i) {
list_for_each_entry(ce, &ctbl->hash_list[i], list) {
ip[0] = '\0';
- if (ce->addr_len == 16)
- sprintf(ip, "%pI6c", ce->addr);
- else
- sprintf(ip, "%pI4c", ce->addr);
+ sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s %u\n", ip,
atomic_read(&ce->refcnt));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba01..35eb43c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
spinlock_t lock; /* Hold while modifying clip reference */
atomic_t refcnt;
struct list_head list;
- u32 addr[4];
- int addr_len;
+ union {
+ struct sockaddr_in addr;
+ struct sockaddr_in6 addr6;
+ };
};
struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17..c6ff489 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -376,8 +376,6 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
- + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
@@ -616,11 +614,13 @@ struct sge {
unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
unsigned int egr_start;
+ unsigned int egr_sz;
unsigned int ingr_start;
- void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
- struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
- DECLARE_BITMAP(starving_fl, MAX_EGRQ);
- DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+ unsigned int ingr_sz;
+ void **egr_map; /* qid->queue egress queue map */
+ struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
+ unsigned long *starving_fl;
+ unsigned long *txq_maperr;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
- __be32 *buf, int dir);
+ void *buf, int dir);
static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
u32 len, __be32 *buf)
{
@@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qtimer_val(const struct adapter *adap,
const struct sge_rspq *q);
+
+int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 78854ce..dcb0479 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
"0.9375" };
int i;
- u16 incr[NMTUS][NCCTRL_WIN];
+ u16 (*incr)[NCCTRL_WIN];
struct adapter *adap = seq->private;
+ incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
+ if (!incr)
+ return -ENOMEM;
+
t4_read_cong_tbl(adap, incr);
for (i = 0; i < NCCTRL_WIN; ++i) {
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
adap->params.a_wnd[i],
dec_fac[adap->params.b_wnd[i]]);
}
+
+ kfree(incr);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a22cf93..d929951 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
{
int i;
- for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler) {
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
}
}
+/* Disable interrupt and napi handler */
+static void disable_interrupts(struct adapter *adap)
+{
+ if (adap->flags & FULL_INIT_DONE) {
+ t4_intr_disable(adap);
+ if (adap->flags & USING_MSIX) {
+ free_msix_queue_irqs(adap);
+ free_irq(adap->msix_info[0].vec, adap);
+ } else {
+ free_irq(adap->pdev->irq, adap);
+ }
+ quiesce_rx(adap);
+ }
+}
+
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
{
int i;
- for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (!q)
@@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap)
int err, msi_idx, i, j;
struct sge *s = &adap->sge;
- bitmap_zero(s->starving_fl, MAX_EGRQ);
- bitmap_zero(s->txq_maperr, MAX_EGRQ);
+ bitmap_zero(s->starving_fl, s->egr_sz);
+ bitmap_zero(s->txq_maperr, s->egr_sz);
if (adap->flags & USING_MSIX)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
@@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = -((int)s->intrq.abs_id + 1);
}
+ /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
+ * don't forget to update the following which need to be
+ * synchronized to and changes here.
+ *
+ * 1. The calculations of MAX_INGQ in cxgb4.h.
+ *
+ * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
+ * to accommodate any new/deleted Ingress Queues
+ * which need MSI-X Vectors.
+ *
+ * 3. Update sge_qinfo_show() to include information on the
+ * new/deleted queues.
+ */
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
msi_idx, NULL, fwevtq_handler);
if (err) {
@@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap)
static void cxgb_down(struct adapter *adapter)
{
- t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
adapter->tid_release_task_busy = false;
adapter->tid_release_head = NULL;
- if (adapter->flags & USING_MSIX) {
- free_msix_queue_irqs(adapter);
- free_irq(adapter->msix_info[0].vec, adapter);
- } else
- free_irq(adapter->pdev->irq, adapter);
- quiesce_rx(adapter);
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
adapter->flags &= ~FULL_INIT_DONE;
@@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
- 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
+ FW_CMD_CAP_PF);
if (ret < 0)
return ret;
@@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- struct fw_devlog_cmd devlog_cmd;
- u32 devlog_meminfo;
int reset = 1;
+ /* Grab Firmware Device Log parameters as early as possible so we have
+ * access to it for debugging, etc.
+ */
+ ret = t4_init_devlog_params(adap);
+ if (ret < 0)
+ return ret;
+
/* Contact FW, advertising Master capability */
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
if (ret < 0) {
@@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
- /* Read firmware device log parameters. We really need to find a way
- * to get these parameters initialized with some default values (which
- * are likely to be correct) for the case where we either don't
- * attache to the firmware or it's crashed when we probe the adapter.
- * That way we'll still be able to perform early firmware startup
- * debugging ... If the request to get the Firmware's Device Log
- * parameters fails, we'll live so we don't make that a fatal error.
- */
- memset(&devlog_cmd, 0, sizeof(devlog_cmd));
- devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
- ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
- &devlog_cmd);
- if (ret == 0) {
- devlog_meminfo =
- ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
- adap->params.devlog.memtype =
- FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
- adap->params.devlog.start =
- FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
- adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
- }
-
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
+ /* qids (ingress/egress) returned from firmware can be anywhere
+ * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
+ * Hence driver needs to allocate memory for this range to
+ * store the queue info. Get the highest IQFLINT/EQ index returned
+ * in FW_EQ_*_CMD.alloc command.
+ */
+ params[0] = FW_PARAM_PFVF(EQ_END);
+ params[1] = FW_PARAM_PFVF(IQFLINT_END);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
+ adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
+
+ adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
+ sizeof(*adap->sge.egr_map), GFP_KERNEL);
+ if (!adap->sge.egr_map) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
+ sizeof(*adap->sge.ingr_map), GFP_KERNEL);
+ if (!adap->sge.ingr_map) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ /* Allocate the memory for the vaious egress queue bitmaps
+ * ie starving_fl and txq_maperr.
+ */
+ adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.starving_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.txq_maperr) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap)
* happened to HW/FW, stop issuing commands.
*/
bye:
+ kfree(adap->sge.egr_map);
+ kfree(adap->sge.ingr_map);
+ kfree(adap->sge.starving_fl);
+ kfree(adap->sge.txq_maperr);
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
netif_carrier_off(dev);
}
spin_unlock(&adap->stats_lock);
+ disable_interrupts(adap);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
@@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
+ kfree(adapter->sge.egr_map);
+ kfree(adapter->sge.ingr_map);
+ kfree(adapter->sge.starving_fl);
+ kfree(adapter->sge.txq_maperr);
disable_msi(adapter);
for_each_port(adapter, i)
@@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev)
if (is_offload(adapter))
detach_ulds(adapter);
+ disable_interrupts(adapter);
+
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b4b9f60..b688b32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data)
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
- for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+ for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->starving_fl[i]; m; m &= m - 1) {
struct sge_eth_rxq *rxq;
unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data)
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
- for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+ for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
struct sge_ofld_txq *txq = s->egr_map[id];
@@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
/* clear the reverse egress queue map */
- memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+ memset(adap->sge.egr_map, 0,
+ adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
}
void t4_sge_start(struct adapter *adap)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b6..ee394dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
* @addr: address within indicated memory type
* @len: amount of memory to transfer
- * @buf: host memory buffer
+ * @hbuf: host memory buffer
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* caller's responsibility to perform appropriate byte order conversions.
*/
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
- u32 len, __be32 *buf, int dir)
+ u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
/* Argument sanity checks ...
*/
- if (addr & 0x3)
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
return -EINVAL;
+ buf = (u32 *)hbuf;
/* It's convenient to be able to handle lengths which aren't a
* multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
*/
while (len > 0) {
if (dir == T4_MEMORY_READ)
- *buf++ = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+ mem_base + offset));
else
t4_write_reg(adap, mem_base + offset,
- (__force u32) *buf++);
+ (__force u32)cpu_to_le32(*buf++));
offset += sizeof(__be32);
len -= sizeof(__be32);
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
*/
if (resid) {
union {
- __be32 word;
+ u32 word;
char byte[4];
} last;
unsigned char *bp;
int i;
if (dir == T4_MEMORY_READ) {
- last.word = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ last.word = le32_to_cpu(
+ (__force __le32)t4_read_reg(adap,
+ mem_base + offset));
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
bp[i] = last.byte[i];
} else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
for (i = resid; i < 4; i++)
last.byte[i] = 0;
t4_write_reg(adap, mem_base + offset,
- (__force u32) last.word);
+ (__force u32)cpu_to_le32(last.word));
}
}
@@ -1086,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
}
/* Installed successfully, update the cached header too. */
- memcpy(card_fw, fs_fw, sizeof(*card_fw));
+ *card_fw = *fs_fw;
card_fw_usable = 1;
*reset = 0; /* already reset as part of load_fw */
}
@@ -4425,6 +4459,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
}
/**
+ * t4_init_devlog_params - initialize adapter->params.devlog
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's Firmware Device Log
+ * Parameters structure.
+ */
+int t4_init_devlog_params(struct adapter *adap)
+{
+ struct devlog_params *dparams = &adap->params.devlog;
+ u32 pf_dparams;
+ unsigned int devlog_meminfo;
+ struct fw_devlog_cmd devlog_cmd;
+ int ret;
+
+ /* If we're dealing with newer firmware, the Device Log Paramerters
+ * are stored in a designated register which allows us to access the
+ * Device Log even if we can't talk to the firmware.
+ */
+ pf_dparams =
+ t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
+ if (pf_dparams) {
+ unsigned int nentries, nentries128;
+
+ dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
+ dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
+
+ nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
+ nentries = (nentries128 + 1) * 128;
+ dparams->size = nentries * sizeof(struct fw_devlog_e);
+
+ return 0;
+ }
+
+ /* Otherwise, ask the firmware for it's Device Log Parameters.
+ */
+ memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+ devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+ &devlog_cmd);
+ if (ret)
+ return ret;
+
+ devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+ dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+ dparams->size = ntohl(devlog_cmd.memsize_devlog);
+
+ return 0;
+}
+
+/**
* t4_init_sge_params - initialize adap->params.sge
* @adapter: the adapter
*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 231a725..326674b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,6 +63,8 @@
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
#define SGE_PF_KDOORBELL_A 0x0
#define QID_S 15
@@ -707,6 +709,7 @@
#define PFNUM_V(x) ((x) << PFNUM_S)
#define PCIE_FW_A 0x30b8
+#define PCIE_FW_PF_A 0x30bc
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 9b353a8..a4a19e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,7 +101,7 @@ enum fw_wr_opcodes {
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
- FW_LASTC2E_WR = 0x40
+ FW_LASTC2E_WR = 0x70
};
struct fw_wr_hdr {
@@ -993,6 +993,7 @@ enum fw_memtype_cf {
FW_MEMTYPE_CF_EXTMEM = 0x2,
FW_MEMTYPE_CF_FLASH = 0x4,
FW_MEMTYPE_CF_INTERNAL = 0x5,
+ FW_MEMTYPE_CF_EXTMEM1 = 0x6,
};
struct fw_caps_config_cmd {
@@ -1035,6 +1036,7 @@ enum fw_params_mnem {
FW_PARAMS_MNEM_PFVF = 2, /* function params */
FW_PARAMS_MNEM_REG = 3, /* limited register access */
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+ FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
FW_PARAMS_MNEM_LAST
};
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
FW_DEVLOG_FACILITY_FCOE = 0x2E,
FW_DEVLOG_FACILITY_FOISCSI = 0x30,
FW_DEVLOG_FACILITY_FOFCOE = 0x32,
- FW_DEVLOG_FACILITY_MAX = 0x32,
+ FW_DEVLOG_FACILITY_CHNET = 0x34,
+ FW_DEVLOG_FACILITY_MAX = 0x34,
};
/* log message format */
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
(((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+/* P C I E F W P F 7 R E G I S T E R */
+
+/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
+ * access the "devlog" which needing to contact firmware. The encoding is
+ * mostly the same as that returned by the DEVLOG command except for the size
+ * which is encoded as the number of entries in multiples-1 of 128 here rather
+ * than the memory size as is done in the DEVLOG command. Thus, 0 means 128
+ * and 15 means 2048. This of course in turn constrains the allowed values
+ * for the devlog size ...
+ */
+#define PCIE_FW_PF_DEVLOG 7
+
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
+ ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
+ (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
+ PCIE_FW_PF_DEVLOG_NENTRIES128_M)
+
+#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
+#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
+#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
+#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
+ (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
+
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
+ (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index e2bd3f7..b9d1cba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,13 +36,13 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0C
-#define T4FW_VERSION_MICRO 0x19
+#define T4FW_VERSION_MINOR 0x0D
+#define T4FW_VERSION_MICRO 0x20
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0C
-#define T5FW_VERSION_MICRO 0x19
+#define T5FW_VERSION_MINOR 0x0D
+#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0545f0d..e0d7110 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
? (tq->pidx - 1)
: (tq->size - 1));
__be64 *src = (__be64 *)&tq->desc[index];
- __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
+ __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
SGE_UDB_WCDOORBELL);
unsigned int count = EQ_UNIT / sizeof(__be64);
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* DMA.
*/
while (count) {
- writeq(*src, dst);
+ /* the (__force u64) is because the compiler
+ * doesn't understand the endian swizzling
+ * going on
+ */
+ writeq((__force u64)*src, dst);
src++;
dst++;
count--;
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
wr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
- wr->r3[0] = cpu_to_be64(0);
- wr->r3[1] = cpu_to_be64(0);
+ wr->r3[0] = cpu_to_be32(0);
+ wr->r3[1] = cpu_to_be32(0);
skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
end = (u64 *)wr + flits;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1b5506d..280b4a2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
if (rpl) {
/* request bit in high-order BE word */
- WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
& FW_CMD_REQUEST_F) == 0);
get_mbox_rpl(adapter, rpl, size, mbox_data);
- WARN_ON((be32_to_cpu(*(u32 *)rpl)
+ WARN_ON((be32_to_cpu(*(__be32 *)rpl)
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
- bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038..a5179bf 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
}
if (ENIC_TEST_INTR(pba, notify_intr)) {
- vnic_intr_return_all_credits(&enic->intr[notify_intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[notify_intr]);
}
if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
struct enic *enic = data;
unsigned int intr = enic_msix_notify_intr(enic);
- vnic_intr_return_all_credits(&enic->intr[intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[intr]);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556..ed41559 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
(unsigned int)tp->rx_ring[i].buffer1,
(unsigned int)tp->rx_ring[i].buffer2,
buf[0], buf[1], buf[2]);
- for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
if (j < 100)
pr_cont(" %02x", buf[j]);
pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37a..27b9fe9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
u16 vlan_tag;
u32 tx_rate;
u32 plink_tracking;
+ u32 privileges;
};
enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cf..7f05f30 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
{
int num_eqs, i = 0;
- if (lancer_chip(adapter) && num > 8) {
- while (num) {
- num_eqs = min(num, 8);
- __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
- i += num_eqs;
- num -= num_eqs;
- }
- } else {
- __be_cmd_modify_eqd(adapter, set_eqd, num);
+ while (num) {
+ num_eqs = min(num, 8);
+ __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
+ i += num_eqs;
+ num -= num_eqs;
}
return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num)
+ u32 num, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
wrb, NULL);
+ req->hdr.domain = domain;
req->interface_id = if_id;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e..a7634a3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
int be_cmd_get_fw_ver(struct be_adapter *adapter);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num);
+ u32 num, u32 domain);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a81685..e6b790f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
- status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ int vf_if_id = vf_cfg->if_handle;
+ int status;
+
+ /* Enable Transparent VLAN Tagging */
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+ if (status)
+ return status;
+
+ /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
+ vids[0] = 0;
+ status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Cleared guest VLANs on VF%d", vf);
+
+ /* After TVT is enabled, disallow VFs to program VLAN filters */
+ if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
+ ~BE_PRIV_FILTMGMT, vf + 1);
+ if (!status)
+ vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
+ }
+ return 0;
+}
+
+static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ /* Reset Transparent VLAN Tagging. */
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
+ vf_cfg->if_handle, 0);
+ if (status)
+ return status;
+
+ /* Allow VFs to program VLAN filtering */
+ if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
+ BE_PRIV_FILTMGMT, vf + 1);
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
+ dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
+ }
+ }
+
+ dev_info(dev,
+ "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
+ return 0;
+}
+
static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- int status = 0;
+ int status;
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan)
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_set_vf_tvt(adapter, vf, vlan);
} else {
- /* Reset Transparent Vlan Tagging. */
- status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
- vf + 1, vf_cfg->if_handle, 0);
+ status = be_clear_vf_tvt(adapter, vf);
}
if (status) {
dev_err(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed : %#x\n", vlan,
- vf, status);
+ "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
+ status);
return be_cmd_status(status);
}
vf_cfg->vlan_tag = vlan;
-
return 0;
}
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
}
}
} else {
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
+ ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
+ ue_lo_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_LOW_MASK);
+ ue_hi_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_HI_MASK);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
u32 cap_flags, u32 vf)
{
u32 en_flags;
- int status;
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
en_flags &= cap_flags;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- if_handle, vf);
-
- return status;
+ return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
}
static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
if (!BE3_chip(adapter)) {
status = be_cmd_get_profile_config(adapter, &res,
vf + 1);
- if (!status)
+ if (!status) {
cap_flags = res.if_cap_flags;
+ /* Prevent VFs from enabling VLAN promiscuous
+ * mode
+ */
+ cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
}
status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
- u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
/* Allow VFs to programs MAC/VLAN filters */
- status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
- if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
+ vf + 1);
+ if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_set_fn_privileges(adapter,
- privileges |
+ vf_cfg->privileges |
BE_PRIV_FILTMGMT,
vf + 1);
- if (!status)
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
dev_info(dev, "VF%d has FILTMGMT privilege\n",
vf);
+ }
}
/* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
+ struct pci_dev *pdev = adapter->pdev;
u8 __iomem *addr;
if (BEx_chip(adapter) && be_physfn(adapter)) {
- adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+ adapter->csr = pci_iomap(pdev, 2, 0);
if (!adapter->csr)
return -ENOMEM;
}
- addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
+ addr = pci_iomap(pdev, db_bar(adapter), 0);
if (!addr)
goto pci_map_err;
adapter->db = addr;
+ if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
+ if (be_physfn(adapter)) {
+ /* PCICFG is the 2nd BAR in BE2 */
+ addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
+ if (!addr)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else {
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+ }
+ }
+
be_roce_map_pci_bars(adapter);
return 0;
pci_map_err:
- dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
+ dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
be_unmap_pci_bars(adapter);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9bb6220..f6a3a7a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@ static void
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{
struct fec_enet_private *fep;
- struct bufdesc *bdp, *bdp_t;
+ struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
struct fec_enet_priv_tx_q *txq;
struct netdev_queue *nq;
int index = 0;
- int i, bdnum;
int entries_free;
fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
if (bdp == txq->cur_tx)
break;
- bdp_t = bdp;
- bdnum = 1;
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
- skb = txq->tx_skbuff[index];
- while (!skb) {
- bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
- skb = txq->tx_skbuff[index];
- bdnum++;
- }
- if (skb_shinfo(skb)->nr_frags &&
- (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
- break;
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
- for (i = 0; i < bdnum; i++) {
- if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
- bdp->cbd_bufaddr = 0;
- if (i < bdnum - 1)
- bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
- }
+ skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL;
+ if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
+ if (!skb) {
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ continue;
+ }
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
vlan_packet_rcvd = true;
- skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
- data, (2 * ETH_ALEN));
+ memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
skb_pull(skb, VLAN_HLEN);
}
@@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id)
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
- if (fep->work_tx || fep->work_rx) {
+ if ((fep->work_tx || fep->work_rx) && fep->link) {
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
@@ -1967,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *node;
int err = -ENXIO, i;
+ u32 mii_speed, holdtime;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2004,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
- fep->phy_speed--;
- fep->phy_speed <<= 1;
+ mii_speed--;
+ if (mii_speed > 63) {
+ dev_err(&pdev->dev,
+ "fec clock (%lu) to fast to get right mii speed\n",
+ clk_get_rate(fep->clk_ipg));
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /*
+ * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+ * MII_SPEED) register that defines the MDIO output hold time. Earlier
+ * versions are RAZ there, so just ignore the difference and write the
+ * register always.
+ * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+ * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+ * output.
+ * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+ * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+ * holdtime cannot result in a value greater than 3.
+ */
+ holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+ fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
fep->mii_bus = mdiobus_alloc();
@@ -3383,7 +3394,6 @@ fec_drv_remove(struct platform_device *pdev)
regulator_disable(fep->reg_phy);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- fec_enet_clk_enable(ndev, false);
of_node_put(fep->phy_node);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df788..7bf3682 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
+static int gfar_of_group_count(struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ if (!of_node_cmp(child->name, "queue-group"))
+ num++;
+
+ return num;
+}
+
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = of_get_available_child_count(np);
+ unsigned int num_grps = gfar_of_group_count(np);
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
/* Parse and initialize group specific information */
if (priv->mode == MQ_MG_MODE) {
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
+ if (of_node_cmp(child->name, "queue-group"))
+ continue;
+
err = gfar_parse_group(child, priv, model);
if (err)
goto err_grp_init;
@@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev)
struct phy_device *phydev = priv->phydev;
if (unlikely(phydev->link != priv->oldlink ||
- phydev->duplex != priv->oldduplex ||
- phydev->speed != priv->oldspeed))
+ (phydev->link && (phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 357e8b57..56b774d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ugeth->phy_interface = phy_interface;
ugeth->max_speed = max_speed;
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(ugeth))
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb..c05e507 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ pr_info("Reboot: freeing all eHEA resources\n");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret = NOTIFY_BAD;
+ struct memory_notify *arg = data;
+
+ mutex_lock(&dlpar_mem_lock);
+
+ switch (action) {
+ case MEM_CANCEL_OFFLINE:
+ pr_info("memory offlining canceled");
+ /* Fall through: re-add canceled memory block */
+
+ case MEM_ONLINE:
+ pr_info("memory is going online");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ case MEM_GOING_OFFLINE:
+ pr_info("memory is going offline");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ default:
+ break;
+ }
+
+ ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
+
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+ return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+ .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+ int i;
+
+ if (ehea_fw_handles.arr)
+ for (i = 0; i < ehea_fw_handles.num_entries; i++)
+ ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+ ehea_fw_handles.arr[i].fwh,
+ FORCE_FREE);
+
+ if (ehea_bcmc_regs.arr)
+ for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+ ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+ ehea_bcmc_regs.arr[i].port_id,
+ ehea_bcmc_regs.arr[i].reg_type,
+ ehea_bcmc_regs.arr[i].macaddr,
+ 0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+ int ret = 0;
+
+ if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ return 0;
+
+ ret = ehea_create_busmap();
+ if (ret) {
+ pr_info("ehea_create_busmap failed\n");
+ goto out;
+ }
+
+ ret = register_reboot_notifier(&ehea_reboot_nb);
+ if (ret) {
+ pr_info("register_reboot_notifier failed\n");
+ goto out;
+ }
+
+ ret = register_memory_notifier(&ehea_mem_nb);
+ if (ret) {
+ pr_info("register_memory_notifier failed\n");
+ goto out2;
+ }
+
+ ret = crash_shutdown_register(ehea_crash_handler);
+ if (ret) {
+ pr_info("crash_shutdown_register failed\n");
+ goto out3;
+ }
+
+ return 0;
+
+out3:
+ unregister_memory_notifier(&ehea_mem_nb);
+out2:
+ unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+ return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+ if (atomic_read(&ehea_memory_hooks_registered))
+ return;
+
+ unregister_reboot_notifier(&ehea_reboot_nb);
+ if (crash_shutdown_unregister(ehea_crash_handler))
+ pr_info("failed unregistering crash handler\n");
+ unregister_memory_notifier(&ehea_mem_nb);
+}
+
static int ehea_probe_adapter(struct platform_device *dev)
{
struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
int ret;
int i;
+ ret = ehea_register_memory_hooks();
+ if (ret)
+ return ret;
+
if (!dev || !dev->dev.of_node) {
pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
return 0;
}
-static void ehea_crash_handler(void)
-{
- int i;
-
- if (ehea_fw_handles.arr)
- for (i = 0; i < ehea_fw_handles.num_entries; i++)
- ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
- ehea_fw_handles.arr[i].fwh,
- FORCE_FREE);
-
- if (ehea_bcmc_regs.arr)
- for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
- ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
- ehea_bcmc_regs.arr[i].port_id,
- ehea_bcmc_regs.arr[i].reg_type,
- ehea_bcmc_regs.arr[i].macaddr,
- 0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- int ret = NOTIFY_BAD;
- struct memory_notify *arg = data;
-
- mutex_lock(&dlpar_mem_lock);
-
- switch (action) {
- case MEM_CANCEL_OFFLINE:
- pr_info("memory offlining canceled");
- /* Readd canceled memory block */
- case MEM_ONLINE:
- pr_info("memory is going online");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- case MEM_GOING_OFFLINE:
- pr_info("memory is going offline");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- default:
- break;
- }
-
- ehea_update_firmware_handles();
- ret = NOTIFY_OK;
-
-out_unlock:
- mutex_unlock(&dlpar_mem_lock);
- return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
- .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
- unsigned long action, void *unused)
-{
- if (action == SYS_RESTART) {
- pr_info("Reboot: freeing all eHEA resources\n");
- ibmebus_unregister_driver(&ehea_driver);
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
-};
-
static int check_module_parm(void)
{
int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
if (ret)
goto out;
- ret = ehea_create_busmap();
- if (ret)
- goto out;
-
- ret = register_reboot_notifier(&ehea_reboot_nb);
- if (ret)
- pr_info("failed registering reboot notifier\n");
-
- ret = register_memory_notifier(&ehea_mem_nb);
- if (ret)
- pr_info("failed registering memory remove notifier\n");
-
- ret = crash_shutdown_register(ehea_crash_handler);
- if (ret)
- pr_info("failed registering crash handler\n");
-
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
pr_err("failed registering eHEA device driver on ebus\n");
- goto out2;
+ goto out;
}
ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
if (ret) {
pr_err("failed to register capabilities attribute, ret=%d\n",
ret);
- goto out3;
+ goto out2;
}
return ret;
-out3:
- ibmebus_unregister_driver(&ehea_driver);
out2:
- unregister_memory_notifier(&ehea_mem_nb);
- unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(ehea_crash_handler);
+ ibmebus_unregister_driver(&ehea_driver);
out:
return ret;
}
static void __exit ehea_module_exit(void)
{
- int ret;
-
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
- unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(ehea_crash_handler);
- if (ret)
- pr_info("failed unregistering crash handler\n");
- unregister_memory_notifier(&ehea_mem_nb);
+ ehea_unregister_memory_hooks();
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc..cd7675a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@ restart_poll:
ibmveth_replenish_task(adapter);
if (frames_processed < budget) {
+ napi_complete(napi);
+
/* We think we are done - reenable interrupts,
* then check once more to make sure we are done.
*/
@@ -1144,8 +1146,6 @@ restart_poll:
BUG_ON(lpar_rc != H_SUCCESS);
- napi_complete(napi);
-
if (ibmveth_rxq_pending_buffer(adapter) &&
napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
return ret;
}
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ u64 mac_address;
+ int rc;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+ if (rc) {
+ netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+ return rc;
+ }
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_open = ibmveth_open,
.ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_fix_features = ibmveth_fix_features,
.ndo_set_features = ibmveth_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ibmveth_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ibmveth_poll_controller,
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffe..6aea65d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb6..a11c70c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f9..c17ee77 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!cmd_buf)
return count;
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
- if (bytes_not_copied < 0)
+ if (bytes_not_copied < 0) {
+ kfree(cmd_buf);
return bytes_not_copied;
+ }
if (bytes_not_copied > 0)
count -= bytes_not_copied;
cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281b..dadda3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ /* In MFP case we can have a much lower count of MSIx
+ * vectors available and so we need to lower the used
+ * q count.
+ */
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
- return;
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Wait for the PF's Tx queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf);
- if (!ret)
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
i40e_pf_unquiesce_all_vsi(pf);
+ }
+
exit:
return ret;
}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
int i, v;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
-
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_clear_interrupt_scheme(pf);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e..5defe0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*errno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ i40e_status old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d..bbf1b12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending < I40E_MIN_DESC_PENDING) &&
- (tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
+/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b0023..dff0bae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 2900438..7088915 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
- !(tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903..c950a03 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 96208f1..2db6532 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2658,16 +2658,11 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvneta_port *pp = netdev_priv(dev);
- int ret;
if (!pp->phy_dev)
return -ENOTSUPP;
- ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
- if (!ret)
- mvneta_adjust_link(dev);
-
- return ret;
+ return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
}
/* Ethtool methods */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index a681d7c..546ca42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -724,7 +724,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
* on the host, we deprecate the error message for this
* specific command/input_mod/opcode_mod/fw-status to be debug.
*/
- if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
+ if (op == MLX4_CMD_SET_PORT &&
+ (in_modifier == 1 || in_modifier == 2) &&
op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
@@ -1993,7 +1994,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
goto reset_slave;
slave_state[slave].vhcr_dma = ((u64) param) << 48;
priv->mfunc.master.slave_state[slave].cookie = 0;
- mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
break;
case MLX4_COMM_CMD_VHCR1:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@@ -2225,6 +2225,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
__raw_writel((__force u32) 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4..3485acf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
- mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
-
#ifdef CONFIG_MLX4_EN_VXLAN
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
vxlan_get_rx_port(dev);
@@ -2807,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
- err = register_netdev(dev);
- if (err) {
- en_err(priv, "Netdev registration failed for port %d\n", port);
- goto out;
- }
- priv->registered = 1;
-
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
@@ -2853,6 +2844,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
+ mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+
+ err = register_netdev(dev);
+ if (err) {
+ en_err(priv, "Netdev registration failed for port %d\n", port);
+ goto out;
+ }
+
+ priv->registered = 1;
+
return 0;
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66..a61009f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
-
+ bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+ priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+
+ if (gro_enabled)
+ priv->dev->features |= NETIF_F_GRO;
+
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 264bc15..6e70ffe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
- for (i = 0; i < dev->num_slaves; i++) {
- if (i != dev->caps.function &&
- master->slave_state[i].active)
- if (mlx4_GEN_EQE(dev, i, eqe))
- mlx4_warn(dev, "Failed to generate event for slave %d\n",
- i);
+ for (i = 0; i <= dev->persist->num_vfs; i++) {
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_slave_state *s_slave =
- &priv->mfunc.master.slave_state[slave];
- if (!s_slave->active) {
- /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ if (slave < 0 || slave > dev->persist->num_vfs ||
+ slave == dev->caps.function ||
+ !priv->mfunc.master.slave_state[slave].active)
return;
- }
slave_event(dev, slave, eqe);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e..ebbe244 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
unsigned long rx_chksum_none;
unsigned long rx_chksum_complete;
unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS 9
+#define NUM_PORT_STATS 10
};
struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553..eda29db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d2..6e413ac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
- int port;
+ int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
} else {
struct mlx4_update_qp_params params = {.flags = 0};
- mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ if (err)
+ goto out;
}
}
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
- return 0;
+out:
+ return err;
}
static int mpt_mask(struct mlx4_dev *dev)
@@ -3092,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
if (!priv->mfunc.master.slave_state)
return -EINVAL;
+ /* check for slave valid, slave not PF, and slave active */
+ if (slave < 0 || slave > dev->persist->num_vfs ||
+ slave == dev->caps.function ||
+ !priv->mfunc.master.slave_state[slave].active)
+ return 0;
+
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d..57a6e6c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
if (mac->phydev)
phy_start(mac->phydev);
- init_timer(&mac->tx->clean_timer);
- mac->tx->clean_timer.function = pasemi_mac_tx_timer;
- mac->tx->clean_timer.data = (unsigned long)mac->tx;
- mac->tx->clean_timer.expires = jiffies+HZ;
- add_timer(&mac->tx->clean_timer);
+ setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+ (unsigned long)mac->tx);
+ mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae..0a5e204 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
} __attribute__ ((aligned(64)));
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
struct rcv_desc {
__le16 reference_handle;
__le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
#define NETXEN_IMAGE_START 0x43000 /* compressed image */
#define NETXEN_SECONDARY_START 0x200000 /* backup images */
#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
-#define NETXEN_USER_START 0x3E8000 /* Firmare info */
+#define NETXEN_USER_START 0x3E8000 /* Firmware info */
#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa43176..f221126 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
#define QLCNIC_BRDCFG_START 0x4000 /* board config */
#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
-#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020a..c70ab40 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
int rc = -EINVAL;
if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firwmare\n");
+ netif_err(tp, ifup, dev, "invalid firmware\n");
goto out;
}
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
RTL_W8(ChipCmd, CmdReset);
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
- netdev_reset_queue(tp->dev);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 status, len;
u32 opts[2];
int frags;
- bool stop_queue;
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+ RTL_W8(TxPoll, NPQ);
- if (!skb->xmit_more || stop_queue ||
- netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
- RTL_W8(TxPoll, NPQ);
-
- mmiowb();
- }
+ mmiowb();
- if (stop_queue) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
unsigned int dirty_tx, tx_left;
- unsigned int bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- pkts_compl++;
- bytes_compl += tx_skb->skb->len;
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
}
if (tp->dirty_tx != dirty_tx) {
- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets += pkts_compl;
- tp->tx_stats.bytes += bytes_compl;
- u64_stats_update_end(&tp->tx_stats.syncp);
-
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd2..736d5d1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
.tpauser = 1,
.hw_swap = 1,
.rmiimode = 1,
- .shift_rd0 = 1,
};
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
sh_eth_reset(ndev);
+
+ /* Set MAC address again */
+ update_mac_address(ndev);
}
/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
+ /* TACT bit must be checked before all the following reads */
+ rmb();
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ /* RACT bit must be checked before all the following reads */
+ rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740, R8A779x, and
- * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * bit 0. However, in case of the R8A7740 and R7S72100
+ * the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
skb_checksum_none_assert(skb);
rxdesc->addr = dma_addr;
}
+ wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE) {
+ if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
u32 count = (sh_eth_read(ndev, RDFAR) -
sh_eth_read(ndev, RDLAR)) >> 4;
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- if (skb_padto(skb, ETH_ZLEN))
+ if (skb_put_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
txdesc->buffer_length = skb->len;
+ wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6a..5cecec2 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
if (enable)
- val |= 1 << rocker_port->lport;
+ val |= 1ULL << rocker_port->lport;
else
- val &= ~(1 << rocker_port->lport);
+ val &= ~(1ULL << rocker_port->lport);
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
}
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ if (!rocker->ports)
+ return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
err = rocker_probe_port(rocker, i);
if (err)
@@ -4466,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev)
struct net_device *master = netdev_master_upper_dev_get(dev);
int err = 0;
+ /* There are currently three cases handled here:
+ * 1. Joining a bridge
+ * 2. Leaving a previously joined bridge
+ * 3. Other, e.g. being added to or removed from a bond or openvswitch,
+ * in which case nothing is done
+ */
if (master && master->rtnl_link_ops &&
!strcmp(master->rtnl_link_ops->kind, "bridge"))
err = rocker_port_bridge_join(rocker_port, master);
- else
+ else if (rocker_port_is_bridged(rocker_port))
err = rocker_port_bridge_leave(rocker_port);
return err;
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127..3449893 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
smc->packets_waiting = 0;
smc_reset(dev);
- init_timer(&smc->media);
- smc->media.function = media_check;
- smc->media.data = (u_long) dev;
- smc->media.expires = jiffies + HZ;
- add_timer(&smc->media);
+ setup_timer(&smc->media, media_check, (u_long)dev);
+ mod_timer(&smc->media, jiffies + HZ);
return 0;
} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f9..8678e39 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,11 @@ static const char version[] =
#include "smc91x.h"
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/assabet.h>
+#include <mach/neponset.h>
+#endif
+
#ifndef SMC_NOWAIT
# define SMC_NOWAIT 0
#endif
@@ -2243,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
- struct resource *res;
+ struct resource *res, *ires;
unsigned int __iomem *addr;
unsigned long irq_flags = SMC_IRQ_FLAGS;
- unsigned long irq_resflags;
int ret;
ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2338,25 +2342,23 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- ndev->irq = platform_get_irq(pdev, 0);
- if (ndev->irq <= 0) {
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
ret = -ENODEV;
goto out_release_io;
}
- /*
- * If this platform does not specify any special irqflags, or if
- * the resource supplies a trigger, override the irqflags with
- * the trigger flags from the resource.
- */
- irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
- if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
- irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
+
+ ndev->irq = ires->start;
+
+ if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
+ irq_flags = ires->flags & IRQF_TRIGGER_MASK;
ret = smc_request_attrib(pdev, ndev);
if (ret)
goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
- neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+ if (machine_is_assabet() && machine_has_neponset())
+ neponset_ncr_set(NCR_ENET_OSC_EN);
#endif
platform_set_drvdata(pdev, ndev);
ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf..3a18501 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
- defined(CONFIG_MACH_MAINSTONE) ||\
- defined(CONFIG_MACH_ZYLONITE) ||\
- defined(CONFIG_MACH_LITTLETON) ||\
- defined(CONFIG_MACH_ZYLONITE2) ||\
- defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2) ||\
- defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
#include <asm/mach-types.h>
@@ -74,95 +67,8 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
- if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
- unsigned int v = val << 16;
- v |= readl(ioaddr + (reg & ~2)) & 0xffff;
- writel(v, ioaddr + (reg & ~2));
- } else {
- writew(val, ioaddr + reg);
- }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 0
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
- defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_ARCH_PXA_IDP) || \
- defined(CONFIG_ARCH_RAMSES) || \
- defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
- if (reg & 2) {
+ if ((machine_is_mainstone() || machine_is_stargate2() ||
+ machine_is_pxa_idp()) && reg & 2) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
-
#elif defined(CONFIG_COLDFIRE)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3..a0ea84f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
- priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
- add_timer(&priv->eee_ctrl_timer);
+ setup_timer(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer,
+ (unsigned long)priv);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(eee_timer));
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index fb846eb..f9b42f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
const char *mac = NULL;
+ int irq, wol_irq, lpi_irq;
+
+ /* Get IRQ information early to have an ability to ask for deferred
+ * probe if needed before we went too far with resource allocation.
+ */
+ irq = platform_get_irq_byname(pdev, "macirq");
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER) {
+ dev_err(dev,
+ "MAC IRQ configuration information not found\n");
+ }
+ return irq;
+ }
+
+ /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (wol_irq < 0) {
+ if (wol_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ wol_irq = irq;
+ }
+
+ lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (lpi_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return PTR_ERR(priv);
}
+ /* Copy IRQ values to priv structure which is now avaialble */
+ priv->dev->irq = irq;
+ priv->wol_irq = wol_irq;
+ priv->lpi_irq = lpi_irq;
+
/* Get MAC address if available (DT) */
if (mac)
memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
- /* Get the MAC information */
- priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
- if (priv->dev->irq < 0) {
- if (priv->dev->irq != -EPROBE_DEFER) {
- netdev_err(priv->dev,
- "MAC IRQ configuration information not found\n");
- }
- return priv->dev->irq;
- }
-
- /*
- * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
- * The external wake up irq can be passed through the platform code
- * named as "eth_wake_irq"
- *
- * In case the wake up interrupt is not passed from the platform
- * so the driver will continue to use the mac irq (ndev->irq)
- */
- priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq < 0) {
- if (priv->wol_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- priv->wol_irq = priv->dev->irq;
- }
-
- priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (priv->lpi_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
platform_set_drvdata(pdev, priv->dev);
pr_debug("STMMAC platform driver registration completed");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f90..0c5842a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
*flow_type = IP_USER_FLOW;
break;
default:
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
TCAM_V4KEY0_CLASS_CODE_SHIFT;
ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
if (ret < 0) {
netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
parent->index);
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d..a1bbaf6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, slave->port_vlan);
+ priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
}
return 0;
}
+#endif
-static const struct dev_pm_ops cpsw_pm_ops = {
- .suspend = cpsw_suspend,
- .resume = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
static const struct of_device_id cpsw_of_mtable[] = {
{ .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b4..c00084d 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int davinci_mdio_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend_late = davinci_mdio_suspend,
- .resume_early = davinci_mdio_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931..0e0fbb5 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
}
if (rx_count < budget) {
+ napi_complete(napi);
w5100_write(priv, W5100_IMR, IR_S0);
mmiowb();
- napi_complete(napi);
}
return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9..4b31000 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
}
if (rx_count < budget) {
+ napi_complete(napi);
w5300_write(priv, W5300_IMR, IR_S0);
mmiowb();
- napi_complete(napi);
}
return rx_count;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f..9e16a28 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
int i;
static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
- if (dev->flags & IFF_ALLMULTI) {
+ if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 924ea98..54549a6 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+ const void *iaddr, bool is_v6);
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2a17500..b7877a1 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hash = (addr->atype == IPVL_IPV6) ?
ipvlan_get_v6_hash(&addr->ip6addr) :
ipvlan_get_v4_hash(&addr->ip4addr);
- hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
+ if (hlist_unhashed(&addr->hlnode))
+ hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
{
- hlist_del_rcu(&addr->hlnode);
+ hlist_del_init_rcu(&addr->hlnode);
if (sync)
synchronize_rcu();
}
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+ const void *iaddr, bool is_v6)
{
- struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
list_for_each_entry(addr, &ipvlan->addrs, anode) {
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
(!is_v6 && addr->atype == IPVL_IPV4 &&
addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
- return true;
+ return addr;
}
+ return NULL;
+}
+
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
+{
+ struct ipvl_dev *ipvlan;
- if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
- return true;
+ ASSERT_RTNL();
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
+ return true;
+ }
return false;
}
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_PAUSE))
return;
- list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (local && (ipvlan == in_dev))
continue;
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
mcast_acct:
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
}
+ rcu_read_unlock();
/* Locally generated? ...Forward a copy to the main-device as
* well. On the RX side we'll ignore it (wont give it to any
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4f4099d..4fa1420 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr, !dev->dismantle);
- list_del_rcu(&addr->anode);
+ list_del(&addr->anode);
}
}
list_del_rcu(&ipvlan->pnode);
@@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
struct ipvl_addr *addr;
- if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
+ if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
@@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
addr->master = ipvlan;
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
- list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ list_add_tail(&addr->anode, &ipvlan->addrs);
ipvlan->ipv6cnt++;
- ipvlan_ht_addr_add(ipvlan, addr);
+ /* If the interface is not up, the address will be added to the hash
+ * list by ipvlan_open.
+ */
+ if (netif_running(ipvlan->dev))
+ ipvlan_ht_addr_add(ipvlan, addr);
return 0;
}
@@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
struct ipvl_addr *addr;
- addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
+ addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
if (!addr)
return;
ipvlan_ht_addr_del(addr, true);
- list_del_rcu(&addr->anode);
+ list_del(&addr->anode);
ipvlan->ipv6cnt--;
WARN_ON(ipvlan->ipv6cnt < 0);
kfree_rcu(addr, rcu);
@@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
struct ipvl_addr *addr;
- if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
+ if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
@@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
addr->master = ipvlan;
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
- list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ list_add_tail(&addr->anode, &ipvlan->addrs);
ipvlan->ipv4cnt++;
- ipvlan_ht_addr_add(ipvlan, addr);
+ /* If the interface is not up, the address will be added to the hash
+ * list by ipvlan_open.
+ */
+ if (netif_running(ipvlan->dev))
+ ipvlan_ht_addr_add(ipvlan, addr);
ipvlan_set_broadcast_mac_filter(ipvlan, true);
return 0;
@@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
struct ipvl_addr *addr;
- addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
+ addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
if (!addr)
return;
ipvlan_ht_addr_del(addr, true);
- list_del_rcu(&addr->anode);
+ list_del(&addr->anode);
ipvlan->ipv4cnt--;
WARN_ON(ipvlan->ipv4cnt < 0);
if (!ipvlan->ipv4cnt)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index e40fdfc..27ecc5c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
} /* else everything is zero */
}
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
struct iov_iter *from, int noblock)
{
- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
struct sk_buff *skb;
struct macvlan_dev *vlan;
unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
}
- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
linear, noblock, &err);
if (!skb)
goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54..32efbd4 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
#define XGBE_PHY_SPEEDS 3
#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_10000_BLWC 0
#define SPEED_10000_CDR 0x7
#define SPEED_10000_PLL 0x1
-#define SPEED_10000_PQ 0x1e
+#define SPEED_10000_PQ 0x12
#define SPEED_10000_RATE 0x0
#define SPEED_10000_TXAMP 0xa
#define SPEED_10000_WORD 0x7
+#define SPEED_10000_DFE_TAP_CONFIG 0x1
+#define SPEED_10000_DFE_TAP_ENABLE 0x7f
#define SPEED_2500_BLWC 1
#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_2500_RATE 0x1
#define SPEED_2500_TXAMP 0xf
#define SPEED_2500_WORD 0x1
+#define SPEED_2500_DFE_TAP_CONFIG 0x3
+#define SPEED_2500_DFE_TAP_ENABLE 0x0
#define SPEED_1000_BLWC 1
#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_1000_RATE 0x3
#define SPEED_1000_TXAMP 0xf
#define SPEED_1000_WORD 0x1
+#define SPEED_1000_DFE_TAP_CONFIG 0x3
+#define SPEED_1000_DFE_TAP_ENABLE 0x0
/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
#define RXTX_REG20_BLWC_ENA_INDEX 2
#define RXTX_REG20_BLWC_ENA_WIDTH 1
#define RXTX_REG114_PQ_REG_INDEX 9
#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
SPEED_10000_TXAMP,
};
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+ SPEED_1000_DFE_TAP_CONFIG,
+ SPEED_2500_DFE_TAP_CONFIG,
+ SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+ SPEED_1000_DFE_TAP_ENABLE,
+ SPEED_2500_DFE_TAP_ENABLE,
+ SPEED_10000_DFE_TAP_ENABLE,
+};
+
enum amd_xgbe_phy_an {
AMD_XGBE_AN_READY = 0,
AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
/* Auto-negotiation state machine support */
struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
status = XSIR0_IOREAD(priv, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- return;
+ goto rx_reset;
}
netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
}
static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
sizeof(priv->serdes_tx_amp));
}
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_CFG_PROPERTY,
+ priv->serdes_dfe_tap_cfg,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_CFG_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_cfg,
+ amd_xgbe_phy_serdes_dfe_tap_cfg,
+ sizeof(priv->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_ENA_PROPERTY,
+ priv->serdes_dfe_tap_ena,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_ENA_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_ena,
+ amd_xgbe_phy_serdes_dfe_tap_ena,
+ sizeof(priv->serdes_dfe_tap_ena));
+ }
+
phydev->priv = priv;
if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6a..52cd8db 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
}
/**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ * speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+ unsigned int idx;
+
+ idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+ return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+ (settings[idx].setting & features);
+}
+
+/**
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
* @phydev: the target phy_device struct
*
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
int status;
- unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
- idx = phy_find_setting(phydev->speed, phydev->duplex);
- if (!(lp & adv & settings[idx].setting))
+ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
goto eee_exit_err;
if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0e62274..7d39484 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
static struct team_port *team_port_get_rcu(const struct net_device *dev)
{
- struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
- return team_port_exists(dev) ? port : NULL;
+ return rcu_dereference(dev->rx_handler_data);
}
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
@@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list)
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
team->ops.port_change_dev_addr(team, port);
- rcu_read_unlock();
+ mutex_unlock(&team->lock);
return 0;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3bd9678..7ba8d08 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
* Linksys USB200M
* Netgear FA120
* Sitecom LN-029
+ * Sitecom LN-028
* Intellinet USB 2.0 Ethernet
* ST Lab USB 2.0 Ethernet
* TrendNet TU2-ET100
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 5c55f11..75d6f26 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
skb_put(skb, sizeof(padbytes));
}
+
+ usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792..1173a24 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0df6, 0x0056),
.driver_info = (unsigned long) &ax88178_info,
}, {
+ // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+ USB_DEVICE (0x0df6, 0x061c),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9311a08..4545e78 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
+#define LENOVO_VENDOR_ID 0x17ef
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -702,6 +703,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 80a844e..c3e4da9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
- dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
/* keep private stats: framing overhead and number of NTBs */
ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
ctx->tx_ntbs++;
- /* usbnet has already counted all the framing overhead.
+ /* usbnet will count all the framing overhead by default.
* Adjust the stats so that the tx_bytes counter show real
* payload data instead.
*/
- dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+ usbnet_set_skb_tx_stats(skb_out, n,
+ ctx->tx_curr_frame_payload - skb_out->len);
return skb_out;
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 3eed708..1762ad3 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@ enum cx82310_status {
};
#define CMD_PACKET_SIZE 64
-/* first command after power on can take around 8 seconds */
-#define CMD_TIMEOUT 15000
+#define CMD_TIMEOUT 100
#define CMD_REPLY_RETRY 5
#define CX82310_MTU 1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) {
- dev_err(&dev->udev->dev, "send command %#x: error %d\n",
- cmd, ret);
+ if (cmd != CMD_GET_LINK_STATUS)
+ dev_err(&dev->udev->dev, "send command %#x: error %d\n",
+ cmd, ret);
goto end;
}
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
buf, CMD_PACKET_SIZE, &actual_len,
CMD_TIMEOUT);
if (ret < 0) {
- dev_err(&dev->udev->dev,
- "reply receive error %d\n", ret);
+ if (cmd != CMD_GET_LINK_STATUS)
+ dev_err(&dev->udev->dev,
+ "reply receive error %d\n",
+ ret);
goto end;
}
if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
char buf[15];
struct usb_device *udev = dev->udev;
+ u8 link[3];
+ int timeout = 50;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->partial_data)
return -ENOMEM;
+ /* wait for firmware to become ready (indicated by the link being up) */
+ while (--timeout) {
+ ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
+ link, sizeof(link));
+ /* the command can time out during boot - it's not an error */
+ if (!ret && link[0] == 1 && link[2] == 1)
+ break;
+ msleep(500);
+ };
+ if (!timeout) {
+ dev_err(&udev->dev, "firmware not ready in time\n");
+ return -ETIMEDOUT;
+ }
+
/* enable ethernet mode (?) */
ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
if (ret) {
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = {
.tx_fixup = cx82310_tx_fixup,
};
+#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_DEV_INFO, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bDeviceClass = (cl), \
+ .bDeviceSubClass = (sc), \
+ .bDeviceProtocol = (pr)
+
static const struct usb_device_id products[] = {
{
- USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
+ USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
.driver_info = (unsigned long) &cx82310_info
},
{ },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9cdfb3f..778e915 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
}
cprev = cnow;
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tiocmget->waitq, &wait);
return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0..1bfe0fc 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
}, {
USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
.driver_info = (unsigned long) &prolific_info,
+}, {
+ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
+ * Host-to-Host Cable
+ */
+ .driver_info = (unsigned long) &prolific_info,
},
{ }, // END
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 438fc6b..9f7c0ab 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -492,6 +492,7 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
+#define VENDOR_ID_LENOVO 0x17ef
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{}
};
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index b94a0fb..953de13 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb_put(skb, sizeof(padbytes));
}
+ usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 449835f..777757a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
- dev->net->stats.tx_packets++;
+ dev->net->stats.tx_packets += entry->packets;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
} else
urb->transfer_flags |= URB_ZERO_PACKET;
}
- entry->length = urb->transfer_buffer_length = length;
+ urb->transfer_buffer_length = length;
+
+ if (info->flags & FLAG_MULTI_PACKET) {
+ /* Driver has set number of packets and a length delta.
+ * Calculate the complete length and ensure that it's
+ * positive.
+ */
+ entry->length += length;
+ if (WARN_ON_ONCE(entry->length <= 0))
+ entry->length = length;
+ } else {
+ usbnet_set_skb_tx_stats(skb, 1, length);
+ }
spin_lock_irqsave(&dev->txq.lock, flags);
retval = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f1ff366..59b0e97 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
{
int i;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ }
kfree(vi->rq);
kfree(vi->sq);
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
+ for (i = 0; i < vi->max_queue_pairs; i++)
napi_disable(&vi->rq[i].napi);
- napi_hash_del(&vi->rq[i].napi);
- netif_napi_del(&vi->rq[i].napi);
- }
}
remove_vq_common(vi);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1e0a775..f8528a4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop;
flags &= ~VXLAN_HF_RCO;
- vni &= VXLAN_VID_MASK;
+ vni &= VXLAN_VNI_MASK;
}
/* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
flags &= ~VXLAN_GBP_USED_BITS;
}
- if (flags || (vni & ~VXLAN_VID_MASK)) {
+ if (flags || vni & ~VXLAN_VNI_MASK) {
/* If there are any unprocessed flags remaining treat
* this as a malformed packet. This behavior diverges from
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2..88d121d 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->rxwaitq, &wait);
while (!chan->rx_status) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
spin_lock_irqsave(&cosa->lock, flags);
if (signal_pending(current) && chan->rx_status == 0) {
chan->rx_status = 1;
remove_wait_queue(&chan->rxwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
spin_unlock_irqrestore(&cosa->lock, flags);
mutex_unlock(&chan->rlock);
return -ERESTARTSYS;
}
}
remove_wait_queue(&chan->rxwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
kbuf = chan->rxdata;
count = chan->rxsize;
spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->txwaitq, &wait);
while (!chan->tx_status) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
spin_lock_irqsave(&cosa->lock, flags);
if (signal_pending(current) && chan->tx_status == 0) {
chan->tx_status = 1;
remove_wait_queue(&chan->txwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
}
}
remove_wait_queue(&chan->txwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
up(&chan->wsem);
spin_unlock_irqrestore(&cosa->lock, flags);
kfree(kbuf);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index cb366ad..f50a6bc 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_buf *bf = avp->av_bcbuf;
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
avp->av_bslot);
tasklet_disable(&sc->bcon_tasklet);
+ cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+
if (bf && bf->bf_mpdu) {
struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
}
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
- if ((vif->type != NL80211_IFTYPE_AP) ||
- (sc->nbcnvifs > 1)) {
+ if (vif->type != NL80211_IFTYPE_AP) {
ath_dbg(common, CONFIG,
"An AP interface is already present !\n");
return false;
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
* enabling/disabling SWBA.
*/
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (!bss_conf->enable_beacon &&
- (sc->nbcnvifs <= 1)) {
- cur_conf->enable_beacon = false;
- } else if (bss_conf->enable_beacon) {
- cur_conf->enable_beacon = true;
- ath9k_cache_beacon_config(sc, ctx, bss_conf);
+ bool enabled = cur_conf->enable_beacon;
+
+ if (!bss_conf->enable_beacon) {
+ cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+ } else {
+ cur_conf->enable_beacon |= BIT(avp->av_bslot);
+ if (!enabled)
+ ath9k_cache_beacon_config(sc, ctx, bss_conf);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 2b79a56..d237373 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -54,7 +54,7 @@ struct ath_beacon_config {
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
- bool enable_beacon;
+ u8 enable_beacon;
bool ibss_creator;
u32 nexttbtt;
u32 intval;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 60aa8d7..8529014 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
- ah->tpc_enabled = true;
+ ah->tpc_enabled = false;
ah->ani_function = ATH9K_ANI_ALL;
if (!AR_SREV_9300_20_OR_LATER(ah))
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccbdb05..75345c1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
case 0x432a: /* BCM4321 */
case 0x432d: /* BCM4322 */
case 0x4352: /* BCM43222 */
+ case 0x435a: /* BCM43228 */
case 0x4333: /* BCM4331 */
case 0x43a2: /* BCM4360 */
case 0x43b3: /* BCM4352 */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index defb7a4..7748a1c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
- brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+ if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
+ brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
/* set chip related quirks */
switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 50cdf70..8eff275 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
void *dcmd_buf = NULL, *wr_pointer;
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
- brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
- cmdhdr->len);
+ if (len < sizeof(*cmdhdr)) {
+ brcmf_err("vendor command too short: %d\n", len);
+ return -EINVAL;
+ }
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
ifp = vif->ifp;
- len -= sizeof(struct brcmf_vndr_dcmd_hdr);
+ brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
+
+ if (cmdhdr->offset > len) {
+ brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
+ return -EINVAL;
+ }
+
+ len -= cmdhdr->offset;
ret_len = cmdhdr->len;
if (ret_len > 0 || len > 0) {
if (len > BRCMF_DCMD_MAXLEN) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a6f22c3..3811878 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -708,7 +708,6 @@ struct iwl_priv {
unsigned long reload_jiffies;
int reload_count;
bool ucode_loaded;
- bool init_ucode_run; /* Don't run init uCode again */
u8 plcp_delta_threshold;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 47e64e8..cceb026 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
- if (vif)
- scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
-
- IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
- if (iwlagn_txfifo_flush(priv, scd_queues)) {
- IWL_ERR(priv, "flush request fail\n");
- goto done;
+ if (drop) {
+ IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
+ scd_queues);
+ if (iwlagn_txfifo_flush(priv, scd_queues)) {
+ IWL_ERR(priv, "flush request fail\n");
+ goto done;
+ }
}
+
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+ iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 4dbef7e..5244e43 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
return 0;
- if (priv->init_ucode_run)
- return 0;
-
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
calib_complete, ARRAY_SIZE(calib_complete),
iwlagn_wait_calib, priv);
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
*/
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
UCODE_CALIB_TIMEOUT);
- if (!ret)
- priv->init_ucode_run = true;
goto out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c3817fa..06f6cc0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
- .led_mode = IWL_LED_BLINK
+ .led_mode = IWL_LED_BLINK, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true
+ .rx_with_siso_diversity = true, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 21e5d08..890b95f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+
const struct iwl_cfg iwl2000_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true
+ .rx_with_siso_diversity = true, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true
+ .rx_with_siso_diversity = true, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 332bbed..724194e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
- .led_mode = IWL_LED_BLINK
+ .led_mode = IWL_LED_BLINK, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true
+ .internal_wimax_coex = true, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8f2c3c8..21b2630 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_BLINK
+ .led_mode = IWL_LED_BLINK, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true
+ .internal_wimax_coex = true, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true
+ .internal_wimax_coex = true, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 996e7f1..c7154ac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
+ kfree(pieces);
return;
try_again:
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 1ec4d55..7810c41 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
if (!vif->bss_conf.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
+ if (mvmvif->phy_ctxt &&
+ IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
mvmvif->phy_ctxt->id))
smps_mode = IEEE80211_SMPS_AUTOMATIC;
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d530ef3..542ee74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
if (!vif->bss_conf.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
+ if (mvmvif->phy_ctxt &&
+ data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
smps_mode = IEEE80211_SMPS_AUTOMATIC;
IWL_DEBUG_COEX(data->mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1ff7ec0..09654e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
+ if ((mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ (mvm->fw->ucode_capa.api[0] &
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS))
hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
}
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- iwl_mvm_cancel_scan(mvm);
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a hw_scan when it's already stopped. This can
+ * happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_scan_completed() and the userspace called
+ * cancel scan scan before ieee80211_scan_work() could run.
+ * To handle that, simply return if the scan is not running.
+ */
+ /* FIXME: for now, we ignore this race for UMAC scans, since
+ * they don't set the scan_status.
+ */
+ if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
+ (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_cancel_scan(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
int ret;
mutex_lock(&mvm->mutex);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a sched_scan when it's already stopped. This
+ * can happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_sched_scan_stopped() and the userspace called
+ * stop sched scan scan before ieee80211_sched_scan_stopped_work()
+ * could run. To handle this, simply return if the scan is
+ * not running.
+ */
+ /* FIXME: for now, we ignore this race for UMAC scans, since
+ * they don't set the scan_status.
+ */
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
+ !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ mutex_unlock(&mvm->mutex);
+ return 0;
+ }
+
ret = iwl_mvm_scan_offload_stop(mvm, false);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
-
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 194bd1f..078f24c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -134,9 +134,12 @@ enum rs_column_mode {
#define MAX_NEXT_COLUMNS 7
#define MAX_COLUMN_CHECKS 3
+struct rs_tx_column;
+
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl);
+ struct iwl_scale_tbl_info *tbl,
+ const struct rs_tx_column *next_col);
struct rs_tx_column {
enum rs_column_mode mode;
@@ -147,13 +150,15 @@ struct rs_tx_column {
};
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl)
+ struct iwl_scale_tbl_info *tbl,
+ const struct rs_tx_column *next_col)
{
- return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
+ return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
}
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl)
+ struct iwl_scale_tbl_info *tbl,
+ const struct rs_tx_column *next_col)
{
if (!sta->ht_cap.ht_supported)
return false;
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl)
+ struct iwl_scale_tbl_info *tbl,
+ const struct rs_tx_column *next_col)
{
if (!sta->ht_cap.ht_supported)
return false;
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl)
+ struct iwl_scale_tbl_info *tbl,
+ const struct rs_tx_column *next_col)
{
struct rs_rate *rate = &tbl->rate;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1271,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ return;
+
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@@ -1590,7 +1600,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
allow_func = next_col->checks[j];
- if (allow_func && !allow_func(mvm, sta, tbl))
+ if (allow_func && !allow_func(mvm, sta, tbl, next_col))
break;
}
@@ -2504,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
+ if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
+ /* if vif isn't initialized mvm doesn't know about
+ * this station, so don't do anything with the it
+ */
+ sta = NULL;
+ mvm_sta = NULL;
+ }
+
/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
/* Treat uninitialized rate scaling data same as non-existing. */
@@ -2820,6 +2838,9 @@ static void rs_rate_update(void *mvm_r,
(struct iwl_op_mode *)mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ return;
+
/* Stop any ongoing aggregations as rs starts off assuming no agg */
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
ieee80211_stop_tx_ba_session(sta, tid);
@@ -3580,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
-static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
{
- struct iwl_lq_sta *lq_sta = mvm_sta;
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+
+ if (!mvmsta->vif)
+ return;
debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
lq_sta, &rs_sta_dbgfs_scale_table_ops);
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7e9aa3c..c47c8051 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0;
- if (iwl_mvm_is_radio_killed(mvm))
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ret = 0;
goto out;
+ }
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
sched ? "offloaded " : "", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
- return ret;
+ goto out;
}
IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
sched ? "offloaded " : "");
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
- if (ret)
- return ret;
-
+out:
/*
* Clear the scan status so the next scan requests will succeed. This
* also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-out:
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1177,7 +1176,7 @@ out:
ieee80211_scan_completed(mvm->hw, true);
}
- return 0;
+ return ret;
}
static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 54fafbf..4b81c0b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
struct iwl_time_event_notif *notif)
{
if (!le32_to_cpu(notif->status)) {
+ if (te_data->vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_connection_loss(te_data->vif);
IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
iwl_mvm_te_clear_data(mvm, te_data);
return;
@@ -750,8 +752,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
* request
*/
list_for_each_entry(te_data, &mvm->time_event_list, list) {
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
- te_data->running) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
is_p2p = true;
goto remove_te;
@@ -766,10 +767,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
* request
*/
list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
- if (te_data->running) {
- mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- goto remove_te;
- }
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ goto remove_te;
}
remove_te:
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 07304e1..96a0540 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
- if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
- tid_data->txq_id, tid, scd_flow)) {
+ if (tid_data->txq_id != scd_flow) {
+ IWL_ERR(mvm,
+ "invalid BA notification: Q %d, tid %d, flow %d\n",
+ tid_data->txq_id, tid, scd_flow);
rcu_read_unlock();
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dbd6bcf..686dd30 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a4c658..8908be6 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- genlmsg_unicast(&init_net, skb, dst_portid);
+ if (genlmsg_unicast(&init_net, skb, dst_portid))
+ goto err_free_txskb;
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
return;
nla_put_failure:
+ nlmsg_free(skb);
+err_free_txskb:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
ieee80211_free_txskb(hw, my_skb);
data->tx_failed++;
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 1d46774..074f716 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
return true;
- } else if (0x86DD == ether_type) {
- return true;
+ } else if (ETH_P_IPV6 == ether_type) {
+ /* TODO: Handle any IPv6 cases that need special handling.
+ * For now, always return false
+ */
+ goto end;
}
end:
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a62170e..8c45cf4 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
/*This is for new trx flow*/
struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
u8 temp_one = 1;
+ u8 *entry;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
ring = &rtlpci->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
- if (pskb)
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+ if (pskb) {
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->ops->get_desc(
+ (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+ pskb->len, PCI_DMA_TODEVICE);
kfree_skb(pskb);
+ }
/*NB: the beacon data buffer must be 32-bit aligned. */
pskb = ieee80211_beacon_get(hw, mac->vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f38227a..3aa8648 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
unsigned int num_queues = vif->num_queues;
int i;
unsigned int queue_index;
- struct xenvif_stats *vif_stats;
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
unsigned long accum = 0;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
- vif_stats = &vif->queues[queue_index].stats;
+ void *vif_stats = &vif->queues[queue_index].stats;
accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
}
data[i] = accum;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2..997cf09 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
s8 st);
+static void push_tx_responses(struct xenvif_queue *queue);
static inline int tx_work_todo(struct xenvif_queue *queue);
@@ -657,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
do {
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
@@ -1343,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
{
unsigned int offset = skb_headlen(skb);
skb_frag_t frags[MAX_SKB_FRAGS];
- int i;
+ int i, f;
struct ubuf_info *uarg;
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
@@ -1383,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
frags[i].page_offset = 0;
skb_frag_size_set(&frags[i], len);
}
- /* swap out with old one */
- memcpy(skb_shinfo(skb)->frags,
- frags,
- i * sizeof(skb_frag_t));
- skb_shinfo(skb)->nr_frags = i;
- skb->truesize += i * PAGE_SIZE;
- /* remove traces of mapped pages and frag_list */
+ /* Copied all the bits from the frag list -- free it. */
skb_frag_list_init(skb);
+ xenvif_skb_zerocopy_prepare(queue, nskb);
+ kfree_skb(nskb);
+
+ /* Release all the original (foreign) frags. */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_frag_unref(skb, f);
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
uarg->callback(uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
+ /* Fill the skb with the new (local) frags. */
+ memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
+ skb_shinfo(skb)->nr_frags = i;
+ skb->truesize += i * PAGE_SIZE;
return 0;
}
@@ -1652,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
+
spin_lock_irqsave(&queue->response_lock, flags);
+
make_tx_response(queue, &pending_tx_info->req, status);
- index = pending_index(queue->pending_prod);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+ * frontend.
+ */
+ index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
- /* TX shouldn't use the index before we give it back here */
- mb();
- queue->pending_prod++;
+
+ push_tx_responses(queue);
+
spin_unlock_irqrestore(&queue->response_lock, flags);
}
@@ -1669,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
- int notify;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
@@ -1679,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+ int notify;
+
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e9b960f..720aaf6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1008,8 +1008,7 @@ err:
static int xennet_change_mtu(struct net_device *dev, int mtu)
{
- int max = xennet_can_sg(dev) ?
- XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+ int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
@@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netdev->ethtool_ops = &xennet_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
-
np->netdev = netdev;
netif_carrier_off(netdev);