summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-13 03:01:55 (GMT)
committerDavid S. Miller <davem@davemloft.net>2011-05-13 03:01:55 (GMT)
commit5c5095494fb545f53b80cbb7539679a10a3472a6 (patch)
treed7c40cd66a58030ddef369bcb9acd8d95e2ac864 /drivers
parent4d586b823acc46c55c889ae1798de236c9d403da (diff)
parentdef57687e9579b7a797681990dff763c411f5347 (diff)
downloadlinux-fsl-qoriq-5c5095494fb545f53b80cbb7539679a10a3472a6.tar.xz
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-next-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bonding/bond_main.c157
-rw-r--r--drivers/net/tg3.c69
-rw-r--r--drivers/net/usb/cdc_ncm.c13
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
5 files changed, 146 insertions, 100 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6312db1..088fd84 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -344,32 +344,6 @@ out:
}
/**
- * bond_has_challenged_slaves
- * @bond: the bond we're working on
- *
- * Searches the slave list. Returns 1 if a vlan challenged slave
- * was found, 0 otherwise.
- *
- * Assumes bond->lock is held.
- */
-static int bond_has_challenged_slaves(struct bonding *bond)
-{
- struct slave *slave;
- int i;
-
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
- pr_debug("found VLAN challenged slave - %s\n",
- slave->dev->name);
- return 1;
- }
- }
-
- pr_debug("no VLAN challenged slaves found\n");
- return 0;
-}
-
-/**
* bond_next_vlan - safely skip to the next item in the vlans list.
* @bond: the bond we're working on
* @curr: item we're advancing from
@@ -1406,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0;
}
-#define BOND_VLAN_FEATURES \
- (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
- NETIF_F_HW_VLAN_FILTER)
-
-/*
- * Compute the common dev->feature set available to all slaves. Some
- * feature bits are managed elsewhere, so preserve those feature bits
- * on the master device.
- */
-static int bond_compute_features(struct bonding *bond)
+static u32 bond_fix_features(struct net_device *dev, u32 features)
{
struct slave *slave;
- struct net_device *bond_dev = bond->dev;
- u32 features = bond_dev->features;
- u32 vlan_features = 0;
- unsigned short max_hard_header_len = max((u16)ETH_HLEN,
- bond_dev->hard_header_len);
+ struct bonding *bond = netdev_priv(dev);
+ u32 mask;
int i;
- features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
- features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_NOCACHE_COPY;
+ read_lock(&bond->lock);
- if (!bond->first_slave)
- goto done;
+ if (!bond->first_slave) {
+ /* Disable adding VLANs to empty bond. But why? --mq */
+ features |= NETIF_F_VLAN_CHALLENGED;
+ goto out;
+ }
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
- vlan_features = bond->first_slave->dev->vlan_features;
bond_for_each_slave(bond, slave, i) {
features = netdev_increment_features(features,
slave->dev->features,
- NETIF_F_ONE_FOR_ALL);
+ mask);
+ }
+
+out:
+ read_unlock(&bond->lock);
+ return features;
+}
+
+#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
+ NETIF_F_SOFT_FEATURES | \
+ NETIF_F_LRO)
+
+static void bond_compute_features(struct bonding *bond)
+{
+ struct slave *slave;
+ struct net_device *bond_dev = bond->dev;
+ u32 vlan_features = BOND_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+ int i;
+
+ read_lock(&bond->lock);
+
+ if (!bond->first_slave)
+ goto done;
+
+ bond_for_each_slave(bond, slave, i) {
vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features,
- NETIF_F_ONE_FOR_ALL);
+ slave->dev->vlan_features, BOND_VLAN_FEATURES);
+
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
}
done:
- features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(bond_dev, features);
- bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
+ bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
- return 0;
+ read_unlock(&bond->lock);
+
+ netdev_change_features(bond_dev);
}
static void bond_setup_by_slave(struct net_device *bond_dev,
@@ -1544,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
- int old_features = bond_dev->features;
int res = 0;
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
@@ -1577,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
bond_dev->name, slave_dev->name,
slave_dev->name, bond_dev->name);
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
- if (bond->slave_cnt == 0) {
- /* First slave, and it is not VLAN challenged,
- * so remove the block of adding VLANs over the bond.
- */
- bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
- }
}
/*
@@ -1775,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- bond_compute_features(bond);
-
write_unlock_bh(&bond->lock);
+ bond_compute_features(bond);
+
read_lock(&bond->lock);
new_slave->last_arp_rx = jiffies;
@@ -1958,7 +1940,7 @@ err_free:
kfree(new_slave);
err_undo_flags:
- bond_dev->features = old_features;
+ bond_compute_features(bond);
return res;
}
@@ -1979,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
+ u32 old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2039,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* release the slave from its bond */
bond_detach_slave(bond, slave);
- bond_compute_features(bond);
-
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
@@ -2084,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (!bond->vlgrp) {
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
+ if (bond->vlgrp) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
bond_dev->name);
}
- } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
- !bond_has_challenged_slaves(bond)) {
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
- bond_dev->name, slave_dev->name, bond_dev->name);
- bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
}
write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
+ bond_compute_features(bond);
+ if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
+ (old_features & NETIF_F_VLAN_CHALLENGED))
+ pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
+
/* must do this from outside any spinlocks */
bond_destroy_slave_symlinks(bond_dev, slave_dev);
@@ -2219,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev)
bond_alb_deinit_slave(bond, slave);
}
- bond_compute_features(bond);
-
bond_destroy_slave_symlinks(bond_dev, slave_dev);
bond_del_vlans_from_slave(bond, slave_dev);
@@ -2269,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (!bond->vlgrp) {
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
+ if (bond->vlgrp) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2282,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev)
out:
write_unlock_bh(&bond->lock);
+
+ bond_compute_features(bond);
+
return 0;
}
@@ -4337,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .get_tso = ethtool_op_get_tso,
- .get_ufo = ethtool_op_get_ufo,
- .get_flags = ethtool_op_get_flags,
};
static const struct net_device_ops bond_netdev_ops = {
@@ -4367,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = {
#endif
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
+ .ndo_fix_features = bond_fix_features,
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4422,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev)
* when there are slaves that are not hw accel
* capable
*/
- bond_dev->features |= (NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER);
- /* By default, we enable GRO on bonding devices.
- * Actual support requires lowlevel drivers are GRO ready.
- */
- bond_dev->features |= NETIF_F_GRO;
+ bond_dev->hw_features = BOND_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->features |= bond_dev->hw_features;
}
static void bond_work_cancel_all(struct bonding *bond)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ec19530..d5a1f9e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3373,8 +3373,8 @@ relink:
tg3_phy_copper_begin(tp);
tg3_readphy(tp, MII_BMSR, &bmsr);
- if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
- (bmsr & BMSR_LSTATUS))
+ if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
+ (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
current_link_up = 1;
}
@@ -6309,6 +6309,42 @@ dma_error:
return NETDEV_TX_OK;
}
+static void tg3_set_loopback(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (features & NETIF_F_LOOPBACK) {
+ if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
+ return;
+
+ /*
+ * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
+ * loopback mode if Half-Duplex mode was negotiated earlier.
+ */
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+
+ /* Enable internal MAC loopback mode */
+ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+ spin_lock_bh(&tp->lock);
+ tw32(MAC_MODE, tp->mac_mode);
+ netif_carrier_on(tp->dev);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode enabled.\n");
+ } else {
+ if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+ return;
+
+ /* Disable internal MAC loopback mode */
+ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+ spin_lock_bh(&tp->lock);
+ tw32(MAC_MODE, tp->mac_mode);
+ /* Force link status check */
+ tg3_setup_phy(tp, 1);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode disabled.\n");
+ }
+}
+
static u32 tg3_fix_features(struct net_device *dev, u32 features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6319,6 +6355,16 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features;
}
+static int tg3_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
+ tg3_set_loopback(dev, features);
+
+ return 0;
+}
+
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
@@ -9485,6 +9531,13 @@ static int tg3_open(struct net_device *dev)
netif_tx_start_all_queues(dev);
+ /*
+ * Reset loopback feature if it was turned on while the device was down
+ * make sure that it's installed properly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ tg3_set_loopback(dev, dev->features);
+
return 0;
err_out3:
@@ -15033,6 +15086,7 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
.ndo_fix_features = tg3_fix_features,
+ .ndo_set_features = tg3_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -15049,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
+ .ndo_set_features = tg3_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -15246,6 +15301,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev->features |= hw_features;
dev->vlan_features |= hw_features;
+ /*
+ * Add loopback capability only for a subset of devices that support
+ * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
+ * loopback for the remaining devices.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT))
+ /* Add the loopback capability */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1033ef6..4ab557d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,13 +54,13 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "23-Apr-2011"
+#define DRIVER_VERSION "06-May-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
/* Maximum NTB length */
-#define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */
+#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
} else {
/* reset variables */
- skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
+ skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
dev_kfree_skb_any(skb);
@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* store last offset */
last_offset = offset;
- if ((last_offset < ctx->tx_max) && ((last_offset %
- le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
+ if (((last_offset < ctx->tx_max) && ((last_offset %
+ le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
+ (((last_offset == ctx->tx_max) && ((ctx->tx_max %
+ le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
+ (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
/* force short packet */
*(((u8 *)skb_out->data) + last_offset) = 0;
last_offset++;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index c0da230..fa6e2ac 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2884,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
int num_tx_queues;
int num_rx_queues;
+ if (!pci_msi_enabled())
+ enable_mq = 0;
+
#ifdef VMXNET3_RSS
if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 8ba7b5f..f50d36f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
+#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */