summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
42 files changed, 817 insertions, 555 deletions
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05..ec6eac1 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
link->open++;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ax_open(dev);
} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d..2777289 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ei_open(dev);
} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c..6725dc0 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;
- while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+ (count < limit)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
int rxcomplete = 0;
- int txcomplete = 0;
unsigned long int flags;
- txcomplete = tse_tx_complete(priv);
+ tse_tx_complete(priv);
rxcomplete = tse_rx(priv, budget);
- if (rxcomplete >= budget || txcomplete > 0)
- return rxcomplete;
+ if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
- netdev_dbg(priv->dev,
- "NAPI Complete, did %d packets with budget %d\n",
- txcomplete+rxcomplete, budget);
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ rxcomplete, budget);
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
- priv->dmaops->enable_rxirq(priv);
- priv->dmaops->enable_txirq(priv);
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- return rxcomplete + txcomplete;
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ }
+ return rxcomplete;
}
/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct altera_tse_private *priv;
- unsigned long int flags;
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
}
priv = netdev_priv(dev);
- /* turn off desc irqs and enable napi rx */
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ spin_lock(&priv->rxdma_irq_lock);
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
if (likely(napi_schedule_prep(&priv->napi))) {
+ spin_lock(&priv->rxdma_irq_lock);
priv->dmaops->disable_rxirq(priv);
priv->dmaops->disable_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
__napi_schedule(&priv->napi);
}
- /* reset IRQs */
- priv->dmaops->clear_rxirq(priv);
- priv->dmaops->clear_txirq(priv);
-
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
return IRQ_HANDLED;
}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
- &priv->rx_fifo_depth)) {
+ &priv->tx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO;
goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d440..885b02b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
}
}
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
return -EINVAL;
}
- phy_stop(pdata->phydev);
-
spin_lock_irqsave(&pdata->lock, flags);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 0);
- /* Powerdown Tx/Rx */
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
+ xgbe_napi_disable(pdata, 0);
+
+ phy_stop(pdata->phydev);
+
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
phy_start(pdata->phydev);
- /* Enable Tx/Rx */
+ xgbe_napi_enable(pdata, 0);
+
hw_if->powerup_tx(pdata);
hw_if->powerup_rx(pdata);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_attach(netdev);
- xgbe_napi_enable(pdata, 0);
netif_tx_start_all_queues(netdev);
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct net_device *netdev = pdata->netdev;
+ int ret;
DBGPR("-->xgbe_start\n");
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
phy_start(pdata->phydev);
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
xgbe_init_tx_timers(pdata);
- xgbe_napi_enable(pdata, 1);
netif_tx_start_all_queues(netdev);
DBGPR("<--xgbe_start\n");
return 0;
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ return ret;
}
static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_stop\n");
- phy_stop(pdata->phydev);
-
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 1);
xgbe_stop_tx_timers(pdata);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int i;
-
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->dev_irq);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- synchronize_irq(channel->dma_irq);
- }
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
xgbe_start(pdata);
DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel = NULL;
- unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
- /* Request interrupts */
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
- goto err_rings;
- }
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev,
- "error requesting irq %d\n",
- channel->dma_irq);
- goto err_irq;
- }
- }
- }
-
ret = xgbe_start(pdata);
if (ret)
- goto err_start;
+ goto err_rings;
DBGPR("<--xgbe_open\n");
return 0;
-err_start:
- hw_if->exit(pdata);
-
-err_irq:
- if (pdata->per_channel_irq) {
- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
err_rings:
desc_if->free_ring_resources(pdata);
@@ -1399,30 +1424,16 @@ err_phy_init:
static int xgbe_close(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- unsigned int i;
DBGPR("-->xgbe_close\n");
/* Stop the device */
xgbe_stop(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupts */
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4..783543a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
- STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+ STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
s = &bcm_sysport_gstrings_stats[i];
switch (s->type) {
case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_SOFT:
continue;
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417..7e3d87a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_SOFT,
};
/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
#define STAT_RXCHK(str, m, ofs) { \
.stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46b..6befde6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
+ BCMGENET_STAT_SOFT,
};
struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
#define STAT_GENET_MISC(str, m, offset) { \
.stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
- STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_SOFT:
continue;
case BCMGENET_STAT_MIB_RX:
case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
}
/* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int pkts_compl = 0;
unsigned int bds_compl;
unsigned int c_index;
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = ring->cbs + last_c_index;
bds_compl = 0;
if (tx_cb_ptr->skb) {
+ pkts_compl++;
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
last_c_index &= (num_tx_bds - 1);
}
- if (ring->free_bds > (MAX_SKB_FRAGS + 1))
- ring->int_disable(priv, ring);
-
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+ }
ring->c_index = c_index;
+
+ return pkts_compl;
}
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ unsigned int released;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
- __bcmgenet_tx_reclaim(dev, ring);
+ released = __bcmgenet_tx_reclaim(dev, ring);
spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_tx_ring *ring =
+ container_of(napi, struct bcmgenet_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+ if (work_done == 0) {
+ napi_complete(napi);
+ ring->int_enable(ring->priv, ring);
+
+ return 0;
+ }
+
+ return budget;
}
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
- if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- ring->int_enable(priv, ring);
- }
out:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
struct device *kdev = &priv->pdev->dev;
int ret;
u32 reg, cpu_mask_clear;
+ int index;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+ for (index = 0; index < priv->hw_params->tx_queues; index++)
+ bcmgenet_intrl2_1_writel(priv, (1 << index),
+ INTRL2_CPU_MASK_CLEAR);
+
/* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
unsigned int first_bd;
spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
ring->index = index;
if (index == DESC_INDEX) {
ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
DMA_END_ADDR);
+
+ napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
}
/* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
return ret;
}
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
kfree(priv->tx_cbs);
}
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_fini_tx_ring(priv, i);
+
+ __bcmgenet_fini_dma(priv);
+}
+
/* init_edma: Initialize DMA control register */
static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
{
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->tx_cbs) {
- bcmgenet_fini_dma(priv);
+ __bcmgenet_fini_dma(priv);
return -ENOMEM;
}
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
struct bcmgenet_priv, napi);
unsigned int work_done;
- /* tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
work_done = bcmgenet_desc_rx(priv, budget);
/* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
+ struct bcmgenet_tx_ring *ring;
unsigned int index;
/* Save irq status for bottom-half processing. */
priv->irq1_stat =
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
- ~priv->int1_mask;
+ ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
/* Check the MBDONE interrupts.
* packet is done, reclaim descriptors
*/
- if (priv->irq1_stat & 0x0000ffff) {
- index = 0;
- for (index = 0; index < 16; index++) {
- if (priv->irq1_stat & (1 << index))
- bcmgenet_tx_reclaim(priv->dev,
- &priv->tx_rings[index]);
+ for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ if (!(priv->irq1_stat & BIT(index)))
+ continue;
+
+ ring = &priv->tx_rings[index];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
}
}
+
return IRQ_HANDLED;
}
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
if (priv->irq0_stat &
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
- /* Tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
+ }
}
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec..0d370d1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
+ struct napi_struct napi; /* NAPI per tx queue */
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
struct bcmgenet_tx_ring *);
void (*int_disable)(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *);
+ struct bcmgenet_priv *priv;
};
/* device context */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a84..c308429 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
}
static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
- int addr_len)
+ u8 v6)
{
- return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
- ipv6_clip_hash(ctbl, addr);
+ return v6 ? ipv6_clip_hash(ctbl, addr) :
+ ipv4_clip_hash(ctbl, addr);
}
static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
- int ret = 0;
+ int ret = -1;
if (!ctbl)
return 0;
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
-
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
spin_lock_init(&ce->lock);
atomic_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
- ce->addr_len = addr_len;
- memcpy(ce->addr, lip, addr_len);
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) {
+ ce->addr6.sin6_family = AF_INET6;
+ memcpy(ce->addr6.sin6_addr.s6_addr,
+ lip, sizeof(struct in6_addr));
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
if (ret) {
write_unlock_bh(&ctbl->lock);
return ret;
}
+ } else {
+ ce->addr.sin_family = AF_INET;
+ memcpy((char *)(&ce->addr.sin_addr), lip,
+ sizeof(struct in_addr));
}
} else {
write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
-
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
+ int ret = -1;
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
for (i = 0 ; i < ctbl->clipt_size; ++i) {
list_for_each_entry(ce, &ctbl->hash_list[i], list) {
ip[0] = '\0';
- if (ce->addr_len == 16)
- sprintf(ip, "%pI6c", ce->addr);
- else
- sprintf(ip, "%pI4c", ce->addr);
+ sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s %u\n", ip,
atomic_read(&ce->refcnt));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba01..35eb43c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
spinlock_t lock; /* Hold while modifying clip reference */
atomic_t refcnt;
struct list_head list;
- u32 addr[4];
- int addr_len;
+ union {
+ struct sockaddr_in addr;
+ struct sockaddr_in6 addr6;
+ };
};
struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17..97842d0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
- __be32 *buf, int dir);
+ void *buf, int dir);
static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
u32 len, __be32 *buf)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b6..853c389 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
* @addr: address within indicated memory type
* @len: amount of memory to transfer
- * @buf: host memory buffer
+ * @hbuf: host memory buffer
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* caller's responsibility to perform appropriate byte order conversions.
*/
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
- u32 len, __be32 *buf, int dir)
+ u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
/* Argument sanity checks ...
*/
- if (addr & 0x3)
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
return -EINVAL;
+ buf = (u32 *)hbuf;
/* It's convenient to be able to handle lengths which aren't a
* multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
*/
while (len > 0) {
if (dir == T4_MEMORY_READ)
- *buf++ = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+ mem_base + offset));
else
t4_write_reg(adap, mem_base + offset,
- (__force u32) *buf++);
+ (__force u32)cpu_to_le32(*buf++));
offset += sizeof(__be32);
len -= sizeof(__be32);
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
*/
if (resid) {
union {
- __be32 word;
+ u32 word;
char byte[4];
} last;
unsigned char *bp;
int i;
if (dir == T4_MEMORY_READ) {
- last.word = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ last.word = le32_to_cpu(
+ (__force __le32)t4_read_reg(adap,
+ mem_base + offset));
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
bp[i] = last.byte[i];
} else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
for (i = resid; i < 4; i++)
last.byte[i] = 0;
t4_write_reg(adap, mem_base + offset,
- (__force u32) last.word);
+ (__force u32)cpu_to_le32(last.word));
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038..a5179bf 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
}
if (ENIC_TEST_INTR(pba, notify_intr)) {
- vnic_intr_return_all_credits(&enic->intr[notify_intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[notify_intr]);
}
if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
struct enic *enic = data;
unsigned int intr = enic_msix_notify_intr(enic);
- vnic_intr_return_all_credits(&enic->intr[intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[intr]);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df788..178e540 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
struct phy_device *phydev = priv->phydev;
if (unlikely(phydev->link != priv->oldlink ||
- phydev->duplex != priv->oldduplex ||
- phydev->speed != priv->oldspeed))
+ (phydev->link && (phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb..c05e507 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ pr_info("Reboot: freeing all eHEA resources\n");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret = NOTIFY_BAD;
+ struct memory_notify *arg = data;
+
+ mutex_lock(&dlpar_mem_lock);
+
+ switch (action) {
+ case MEM_CANCEL_OFFLINE:
+ pr_info("memory offlining canceled");
+ /* Fall through: re-add canceled memory block */
+
+ case MEM_ONLINE:
+ pr_info("memory is going online");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ case MEM_GOING_OFFLINE:
+ pr_info("memory is going offline");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ default:
+ break;
+ }
+
+ ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
+
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+ return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+ .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+ int i;
+
+ if (ehea_fw_handles.arr)
+ for (i = 0; i < ehea_fw_handles.num_entries; i++)
+ ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+ ehea_fw_handles.arr[i].fwh,
+ FORCE_FREE);
+
+ if (ehea_bcmc_regs.arr)
+ for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+ ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+ ehea_bcmc_regs.arr[i].port_id,
+ ehea_bcmc_regs.arr[i].reg_type,
+ ehea_bcmc_regs.arr[i].macaddr,
+ 0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+ int ret = 0;
+
+ if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ return 0;
+
+ ret = ehea_create_busmap();
+ if (ret) {
+ pr_info("ehea_create_busmap failed\n");
+ goto out;
+ }
+
+ ret = register_reboot_notifier(&ehea_reboot_nb);
+ if (ret) {
+ pr_info("register_reboot_notifier failed\n");
+ goto out;
+ }
+
+ ret = register_memory_notifier(&ehea_mem_nb);
+ if (ret) {
+ pr_info("register_memory_notifier failed\n");
+ goto out2;
+ }
+
+ ret = crash_shutdown_register(ehea_crash_handler);
+ if (ret) {
+ pr_info("crash_shutdown_register failed\n");
+ goto out3;
+ }
+
+ return 0;
+
+out3:
+ unregister_memory_notifier(&ehea_mem_nb);
+out2:
+ unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+ return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+ if (atomic_read(&ehea_memory_hooks_registered))
+ return;
+
+ unregister_reboot_notifier(&ehea_reboot_nb);
+ if (crash_shutdown_unregister(ehea_crash_handler))
+ pr_info("failed unregistering crash handler\n");
+ unregister_memory_notifier(&ehea_mem_nb);
+}
+
static int ehea_probe_adapter(struct platform_device *dev)
{
struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
int ret;
int i;
+ ret = ehea_register_memory_hooks();
+ if (ret)
+ return ret;
+
if (!dev || !dev->dev.of_node) {
pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
return 0;
}
-static void ehea_crash_handler(void)
-{
- int i;
-
- if (ehea_fw_handles.arr)
- for (i = 0; i < ehea_fw_handles.num_entries; i++)
- ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
- ehea_fw_handles.arr[i].fwh,
- FORCE_FREE);
-
- if (ehea_bcmc_regs.arr)
- for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
- ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
- ehea_bcmc_regs.arr[i].port_id,
- ehea_bcmc_regs.arr[i].reg_type,
- ehea_bcmc_regs.arr[i].macaddr,
- 0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- int ret = NOTIFY_BAD;
- struct memory_notify *arg = data;
-
- mutex_lock(&dlpar_mem_lock);
-
- switch (action) {
- case MEM_CANCEL_OFFLINE:
- pr_info("memory offlining canceled");
- /* Readd canceled memory block */
- case MEM_ONLINE:
- pr_info("memory is going online");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- case MEM_GOING_OFFLINE:
- pr_info("memory is going offline");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- default:
- break;
- }
-
- ehea_update_firmware_handles();
- ret = NOTIFY_OK;
-
-out_unlock:
- mutex_unlock(&dlpar_mem_lock);
- return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
- .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
- unsigned long action, void *unused)
-{
- if (action == SYS_RESTART) {
- pr_info("Reboot: freeing all eHEA resources\n");
- ibmebus_unregister_driver(&ehea_driver);
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
-};
-
static int check_module_parm(void)
{
int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
if (ret)
goto out;
- ret = ehea_create_busmap();
- if (ret)
- goto out;
-
- ret = register_reboot_notifier(&ehea_reboot_nb);
- if (ret)
- pr_info("failed registering reboot notifier\n");
-
- ret = register_memory_notifier(&ehea_mem_nb);
- if (ret)
- pr_info("failed registering memory remove notifier\n");
-
- ret = crash_shutdown_register(ehea_crash_handler);
- if (ret)
- pr_info("failed registering crash handler\n");
-
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
pr_err("failed registering eHEA device driver on ebus\n");
- goto out2;
+ goto out;
}
ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
if (ret) {
pr_err("failed to register capabilities attribute, ret=%d\n",
ret);
- goto out3;
+ goto out2;
}
return ret;
-out3:
- ibmebus_unregister_driver(&ehea_driver);
out2:
- unregister_memory_notifier(&ehea_mem_nb);
- unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(ehea_crash_handler);
+ ibmebus_unregister_driver(&ehea_driver);
out:
return ret;
}
static void __exit ehea_module_exit(void)
{
- int ret;
-
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
- unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(ehea_crash_handler);
- if (ret)
- pr_info("failed unregistering crash handler\n");
- unregister_memory_notifier(&ehea_mem_nb);
+ ehea_unregister_memory_hooks();
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc..072426a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
return ret;
}
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ u64 mac_address;
+ int rc;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+ if (rc) {
+ netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+ return rc;
+ }
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_open = ibmveth_open,
.ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_fix_features = ibmveth_fix_features,
.ndo_set_features = ibmveth_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ibmveth_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ibmveth_poll_controller,
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffe..6aea65d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb6..a11c70c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f9..c17ee77 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!cmd_buf)
return count;
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
- if (bytes_not_copied < 0)
+ if (bytes_not_copied < 0) {
+ kfree(cmd_buf);
return bytes_not_copied;
+ }
if (bytes_not_copied > 0)
count -= bytes_not_copied;
cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281b..dadda3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ /* In MFP case we can have a much lower count of MSIx
+ * vectors available and so we need to lower the used
+ * q count.
+ */
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
- return;
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Wait for the PF's Tx queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf);
- if (!ret)
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
i40e_pf_unquiesce_all_vsi(pf);
+ }
+
exit:
return ret;
}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
int i, v;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
-
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_clear_interrupt_scheme(pf);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e..5defe0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*errno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ i40e_status old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d..bbf1b12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending < I40E_MIN_DESC_PENDING) &&
- (tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
+/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b0023..dff0bae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 2900438..7088915 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
- !(tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903..c950a03 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66..a61009f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
-
+ bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+ priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+
+ if (gro_enabled)
+ priv->dev->features |= NETIF_F_GRO;
+
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553..eda29db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d2..d97ca88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
- int port;
+ int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
} else {
struct mlx4_update_qp_params params = {.flags = 0};
- mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ if (err)
+ goto out;
}
}
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
- return 0;
+out:
+ return err;
}
static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d..57a6e6c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
if (mac->phydev)
phy_start(mac->phydev);
- init_timer(&mac->tx->clean_timer);
- mac->tx->clean_timer.function = pasemi_mac_tx_timer;
- mac->tx->clean_timer.data = (unsigned long)mac->tx;
- mac->tx->clean_timer.expires = jiffies+HZ;
- add_timer(&mac->tx->clean_timer);
+ setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+ (unsigned long)mac->tx);
+ mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae..0a5e204 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
} __attribute__ ((aligned(64)));
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
struct rcv_desc {
__le16 reference_handle;
__le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
#define NETXEN_IMAGE_START 0x43000 /* compressed image */
#define NETXEN_SECONDARY_START 0x200000 /* backup images */
#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
-#define NETXEN_USER_START 0x3E8000 /* Firmare info */
+#define NETXEN_USER_START 0x3E8000 /* Firmware info */
#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa43176..f221126 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
#define QLCNIC_BRDCFG_START 0x4000 /* board config */
#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
-#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020a..c70ab40 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
int rc = -EINVAL;
if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firwmare\n");
+ netif_err(tp, ifup, dev, "invalid firmware\n");
goto out;
}
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
RTL_W8(ChipCmd, CmdReset);
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
- netdev_reset_queue(tp->dev);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 status, len;
u32 opts[2];
int frags;
- bool stop_queue;
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+ RTL_W8(TxPoll, NPQ);
- if (!skb->xmit_more || stop_queue ||
- netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
- RTL_W8(TxPoll, NPQ);
-
- mmiowb();
- }
+ mmiowb();
- if (stop_queue) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
unsigned int dirty_tx, tx_left;
- unsigned int bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- pkts_compl++;
- bytes_compl += tx_skb->skb->len;
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
}
if (tp->dirty_tx != dirty_tx) {
- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets += pkts_compl;
- tp->tx_stats.bytes += bytes_compl;
- u64_stats_update_end(&tp->tx_stats.syncp);
-
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd2..736d5d1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
.tpauser = 1,
.hw_swap = 1,
.rmiimode = 1,
- .shift_rd0 = 1,
};
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
sh_eth_reset(ndev);
+
+ /* Set MAC address again */
+ update_mac_address(ndev);
}
/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
+ /* TACT bit must be checked before all the following reads */
+ rmb();
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ /* RACT bit must be checked before all the following reads */
+ rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740, R8A779x, and
- * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * bit 0. However, in case of the R8A7740 and R7S72100
+ * the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
skb_checksum_none_assert(skb);
rxdesc->addr = dma_addr;
}
+ wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE) {
+ if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
u32 count = (sh_eth_read(ndev, RDFAR) -
sh_eth_read(ndev, RDLAR)) >> 4;
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- if (skb_padto(skb, ETH_ZLEN))
+ if (skb_put_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
txdesc->buffer_length = skb->len;
+ wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6a..9fb6948 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
if (enable)
- val |= 1 << rocker_port->lport;
+ val |= 1ULL << rocker_port->lport;
else
- val &= ~(1 << rocker_port->lport);
+ val &= ~(1ULL << rocker_port->lport);
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
}
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ if (!rocker->ports)
+ return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
err = rocker_probe_port(rocker, i);
if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127..3449893 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
smc->packets_waiting = 0;
smc_reset(dev);
- init_timer(&smc->media);
- smc->media.function = media_check;
- smc->media.data = (u_long) dev;
- smc->media.expires = jiffies + HZ;
- add_timer(&smc->media);
+ setup_timer(&smc->media, media_check, (u_long)dev);
+ mod_timer(&smc->media, jiffies + HZ);
return 0;
} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f9..209ee1b 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,10 @@ static const char version[] =
#include "smc91x.h"
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/neponset.h>
+#endif
+
#ifndef SMC_NOWAIT
# define SMC_NOWAIT 0
#endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = smc_request_attrib(pdev, ndev);
if (ret)
goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
- neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+ if (machine_is_assabet() && machine_has_neponset())
+ neponset_ncr_set(NCR_ENET_OSC_EN);
#endif
platform_set_drvdata(pdev, ndev);
ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf..3a18501 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
- defined(CONFIG_MACH_MAINSTONE) ||\
- defined(CONFIG_MACH_ZYLONITE) ||\
- defined(CONFIG_MACH_LITTLETON) ||\
- defined(CONFIG_MACH_ZYLONITE2) ||\
- defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2) ||\
- defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
#include <asm/mach-types.h>
@@ -74,95 +67,8 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
- if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
- unsigned int v = val << 16;
- v |= readl(ioaddr + (reg & ~2)) & 0xffff;
- writel(v, ioaddr + (reg & ~2));
- } else {
- writew(val, ioaddr + reg);
- }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 0
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
- defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_ARCH_PXA_IDP) || \
- defined(CONFIG_ARCH_RAMSES) || \
- defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
- if (reg & 2) {
+ if ((machine_is_mainstone() || machine_is_stargate2() ||
+ machine_is_pxa_idp()) && reg & 2) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
-
#elif defined(CONFIG_COLDFIRE)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3..a0ea84f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
- priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
- add_timer(&priv->eee_ctrl_timer);
+ setup_timer(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer,
+ (unsigned long)priv);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(eee_timer));
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f90..0c5842a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
*flow_type = IP_USER_FLOW;
break;
default:
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
TCAM_V4KEY0_CLASS_CODE_SHIFT;
ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
if (ret < 0) {
netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
parent->index);
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d..a1bbaf6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, slave->port_vlan);
+ priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
}
return 0;
}
+#endif
-static const struct dev_pm_ops cpsw_pm_ops = {
- .suspend = cpsw_suspend,
- .resume = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
static const struct of_device_id cpsw_of_mtable[] = {
{ .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b4..c00084d 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int davinci_mdio_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend_late = davinci_mdio_suspend,
- .resume_early = davinci_mdio_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f..9e16a28 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
int i;
static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
- if (dev->flags & IFF_ALLMULTI) {
+ if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);