diff options
Diffstat (limited to 'drivers/net')
57 files changed, 874 insertions, 460 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b721902..a7d4735 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; @@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond) dst_release_flag &= slave->dev->priv_flags; if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; bond_dev->priv_flags = flags | dst_release_flag; @@ -1519,7 +1526,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* no need to lock since we're protected by rtnl_lock */ if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); - if (bond_vlan_used(bond)) { + if (vlan_uses_dev(bond_dev)) { pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", bond_dev->name, slave_dev->name, bond_dev->name); return -EPERM; @@ -3452,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) /*-------------------------- Device entry points ----------------------------*/ +static void bond_work_init_all(struct bonding *bond) +{ + INIT_DELAYED_WORK(&bond->mcast_work, + bond_resend_igmp_join_requests_delayed); + INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); + INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) + INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); + else + INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); + INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); +} + +static void bond_work_cancel_all(struct bonding *bond) +{ + cancel_delayed_work_sync(&bond->mii_work); + cancel_delayed_work_sync(&bond->arp_work); + cancel_delayed_work_sync(&bond->alb_work); + cancel_delayed_work_sync(&bond->ad_work); + cancel_delayed_work_sync(&bond->mcast_work); +} + static int bond_open(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -3474,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev) } read_unlock(&bond->lock); - INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); + bond_work_init_all(bond); if (bond_is_lb(bond)) { /* bond_alb_initialize must be called before the timer * is started. */ - if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { - /* something went wrong - fail the open operation */ + if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) return -ENOMEM; - } - - INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); queue_delayed_work(bond->wq, &bond->alb_work, 0); } - if (bond->params.miimon) { /* link check interval, in milliseconds. */ - INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); + if (bond->params.miimon) /* link check interval, in milliseconds. */ queue_delayed_work(bond->wq, &bond->mii_work, 0); - } if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) - INIT_DELAYED_WORK(&bond->arp_work, - bond_activebackup_arp_mon); - else - INIT_DELAYED_WORK(&bond->arp_work, - bond_loadbalance_arp_mon); - queue_delayed_work(bond->wq, &bond->arp_work, 0); if (bond->params.arp_validate) bond->recv_probe = bond_arp_rcv; } if (bond->params.mode == BOND_MODE_8023AD) { - INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); queue_delayed_work(bond->wq, &bond->ad_work, 0); /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; @@ -3523,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); write_lock_bh(&bond->lock); - bond->send_peer_notif = 0; - write_unlock_bh(&bond->lock); - if (bond->params.miimon) { /* link check interval, in milliseconds. */ - cancel_delayed_work_sync(&bond->mii_work); - } - - if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ - cancel_delayed_work_sync(&bond->arp_work); - } - - switch (bond->params.mode) { - case BOND_MODE_8023AD: - cancel_delayed_work_sync(&bond->ad_work); - break; - case BOND_MODE_TLB: - case BOND_MODE_ALB: - cancel_delayed_work_sync(&bond->alb_work); - break; - default: - break; - } - - if (delayed_work_pending(&bond->mcast_work)) - cancel_delayed_work_sync(&bond->mcast_work); - + bond_work_cancel_all(bond); if (bond_is_lb(bond)) { /* Must be called only after all * slaves have been released @@ -4429,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->features |= bond_dev->hw_features; } -static void bond_work_cancel_all(struct bonding *bond) -{ - if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) - cancel_delayed_work_sync(&bond->mii_work); - - if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) - cancel_delayed_work_sync(&bond->arp_work); - - if (bond->params.mode == BOND_MODE_ALB && - delayed_work_pending(&bond->alb_work)) - cancel_delayed_work_sync(&bond->alb_work); - - if (bond->params.mode == BOND_MODE_8023AD && - delayed_work_pending(&bond->ad_work)) - cancel_delayed_work_sync(&bond->ad_work); - - if (delayed_work_pending(&bond->mcast_work)) - cancel_delayed_work_sync(&bond->mcast_work); -} - /* * Destroy a bonding device. * Must be under rtnl_lock when this function is called. @@ -4699,12 +4670,13 @@ static int bond_check_params(struct bond_params *params) arp_ip_count++) { /* not complete check, but should be good enough to catch mistakes */ - if (!isdigit(arp_ip_target[arp_ip_count][0])) { + __be32 ip = in_aton(arp_ip_target[arp_ip_count]); + if (!isdigit(arp_ip_target[arp_ip_count][0]) || + ip == 0 || ip == htonl(INADDR_BROADCAST)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", arp_ip_target[arp_ip_count]); arp_interval = 0; } else { - __be32 ip = in_aton(arp_ip_target[arp_ip_count]); arp_target[arp_ip_count] = ip; } } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index dc15d24..1877ed7 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no arp_interval value specified.\n", bond->dev->name); @@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d, pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", bond->dev->name, bond->dev->name); bond->params.miimon = 0; - if (delayed_work_pending(&bond->mii_work)) { - cancel_delayed_work(&bond->mii_work); - flush_workqueue(bond->wq); - } } if (!bond->params.arp_targets[0]) { pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", @@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d, * timer will get fired off when the open function * is called. */ - if (!delayed_work_pending(&bond->arp_work)) { - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) - INIT_DELAYED_WORK(&bond->arp_work, - bond_activebackup_arp_mon); - else - INIT_DELAYED_WORK(&bond->arp_work, - bond_loadbalance_arp_mon); - - queue_delayed_work(bond->wq, &bond->arp_work, 0); - } + cancel_delayed_work_sync(&bond->mii_work); + queue_delayed_work(bond->wq, &bond->arp_work, 0); } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, @@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no miimon value specified.\n", bond->dev->name); @@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d, bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; } - if (delayed_work_pending(&bond->arp_work)) { - cancel_delayed_work(&bond->arp_work); - flush_workqueue(bond->wq); - } } if (bond->dev->flags & IFF_UP) { @@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d, * timer will get fired off when the open function * is called. */ - if (!delayed_work_pending(&bond->mii_work)) { - INIT_DELAYED_WORK(&bond->mii_work, - bond_mii_monitor); - queue_delayed_work(bond->wq, - &bond->mii_work, 0); - } + cancel_delayed_work_sync(&bond->arp_work); + queue_delayed_work(bond->wq, &bond->mii_work, 0); } } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, @@ -1060,7 +1046,7 @@ static ssize_t bonding_store_primary(struct device *d, goto out; } - sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ + sscanf(buf, "%15s", ifname); /* IFNAMSIZ */ /* check to see if we are clearing primary */ if (!strlen(ifname) || buf[0] == '\n') { @@ -1237,7 +1223,7 @@ static ssize_t bonding_store_active_slave(struct device *d, goto out; } - sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ + sscanf(buf, "%15s", ifname); /* IFNAMSIZ */ /* check to see if we are clearing active */ if (!strlen(ifname) || buf[0] == '\n') { @@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, goto out; } + read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (!bond_is_active_slave(slave)) { if (new_value) @@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, slave->inactive = 1; } } + read_unlock(&bond->lock); out: return ret; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index c78ecfc..a412bf6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -144,9 +144,22 @@ #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) -/* FLEXCAN hardware feature flags */ +/* + * FLEXCAN hardware feature flags + * + * Below is some version info we got: + * SOC Version IP-Version Glitch- [TR]WRN_INT + * Filter? connected? + * MX25 FlexCAN2 03.00.00.00 no no + * MX28 FlexCAN2 03.00.04.00 yes yes + * MX35 FlexCAN2 03.00.00.00 no no + * MX53 FlexCAN2 03.00.00.00 yes no + * MX6s FlexCAN3 10.00.12.00 yes yes + * + * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. + */ #define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */ -#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */ +#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */ /* Structure of the message buffer */ struct flexcan_mb { @@ -205,7 +218,7 @@ static struct flexcan_devtype_data fsl_p1010_devtype_data = { }; static struct flexcan_devtype_data fsl_imx28_devtype_data; static struct flexcan_devtype_data fsl_imx6q_devtype_data = { - .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE, + .features = FLEXCAN_HAS_V10_FEATURES, }; static const struct can_bittiming_const flexcan_bittiming_const = { diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index f5b82ae..6525dbc 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -30,9 +30,10 @@ #include "sja1000.h" -MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); +MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards"); MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" @@ -64,7 +65,11 @@ struct peak_pci_chan { #define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */ #define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */ #define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */ -#define PEAK_MPCI_DEVICE_ID 0x0008 /* The miniPCI slot cards */ +#define PEAK_CPCI_DEVICE_ID 0x0004 /* for nextgen cPCI slot cards */ +#define PEAK_MPCI_DEVICE_ID 0x0005 /* for nextgen miniPCI slot cards */ +#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ +#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ +#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ #define PEAK_PCI_CHAN_MAX 4 @@ -76,6 +81,10 @@ static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = { {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #endif diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 86f26a1..25723d8 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, mc->pdev->dev.can.state = new_state; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts->hwtstamp = timeval_to_ktime(tv); } netif_rx(skb); @@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) struct sk_buff *skb; struct can_frame *cf; struct timeval tv; + struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) @@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) /* convert timestamp into kernel time */ peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); /* push the skb */ netif_rx(skb); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index e1626d9..30d79bf 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct can_frame *can_frame; struct sk_buff *skb; struct timeval tv; + struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(netdev, &can_frame); if (!skb) @@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, memcpy(can_frame->data, rx->data, can_frame->can_dlc); peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; @@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, u8 err_mask = 0; struct sk_buff *skb; struct timeval tv; + struct skb_shared_hwtstamps *hwts; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) @@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, dev->can.state = new_state; peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index d04911d..47618e5 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2422099..4833b6a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_shinfo(skb)->nr_frags + BDS_PER_TX_PKT + NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; - netif_tx_stop_queue(txq); - BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + /* Handle special storage cases separately */ + if (txdata->tx_ring_size != 0) { + BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + netif_tx_stop_queue(txq); + } + return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c65295d..6e5bdd1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) SHMEM_EEE_ADV_STATUS_SHIFT); if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { DP(BNX2X_MSG_ETHTOOL, - "Direct manipulation of EEE advertisment is not supported\n"); + "Direct manipulation of EEE advertisement is not supported\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index e2e45ee..f6cfdc6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -137,7 +137,16 @@ #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD - +#define LINK_UPDATE_MASK \ + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ + LINK_STATUS_LINK_UP | \ + LINK_STATUS_PHYSICAL_LINK_FLAG | \ + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) #define SFP_EEPROM_CON_TYPE_ADDR 0x2 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 @@ -3295,6 +3304,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) DEFAULT_PHY_DEV_ADDR); } +static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Set correct devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, + phy->def_md_devad); + break; + } +} + static void bnx2x_xgxs_deassert(struct link_params *params) { struct bnx2x *bp = params->bp; @@ -3309,10 +3333,8 @@ static void bnx2x_xgxs_deassert(struct link_params *params) REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - params->phy[INT_PHY].def_md_devad); + bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params, + PHY_INIT); } static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, @@ -3545,14 +3567,11 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 val16 = 0, lane, i; + u16 lane, i, cl72_ctrl, an_adv = 0; + u16 ucode_ver; struct bnx2x *bp = params->bp; static struct bnx2x_reg_set reg_set[] = { {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff}, - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555}, {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, @@ -3565,12 +3584,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); + cl72_ctrl &= 0xf8ff; + cl72_ctrl |= 0x3800; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); + /* Check adding advertisement for 1G KX */ if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (vars->line_speed == SPEED_1000)) { u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; - val16 |= (1<<5); + an_adv |= (1<<5); /* Enable CL37 1G Parallel Detect */ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); @@ -3580,11 +3606,14 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || (vars->line_speed == SPEED_10000)) { /* Check adding advertisement for 10G KR */ - val16 |= (1<<7); + an_adv |= (1<<7); /* Enable 10G Parallel Detect */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 1); - + bnx2x_set_aer_mmd(params, phy); DP(NETIF_MSG_LINK, "Advertize 10G\n"); } @@ -3604,7 +3633,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Advertised speeds */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); /* Advertised and set FEC (Forward Error Correction) */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -3628,9 +3657,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); - if (val16 < 0xd108) { - DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); + MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver); + if (ucode_ver < 0xd108) { + DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n", + ucode_ver); vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; } bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, @@ -3651,21 +3681,16 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 i; + u16 val16, i, lane; static struct bnx2x_reg_set reg_set[] = { /* Disable Autoneg */ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00}, {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, - /* Disable CL36 PCS Tx */ - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0}, - /* Double Wide Single Data Rate @ pll rate */ - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF}, /* Leave cl72 training enable, needed for KR */ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, @@ -3676,11 +3701,24 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - /* Leave CL72 enabled */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - 0x3800); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Global registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 &= ~(0x0011 << lane); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 |= (0x0303 << (lane << 1)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); /* Set speed via PMA/PMD register */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); @@ -4303,7 +4341,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - u16 val16; + u16 val16, lane; bnx2x_sfp_e3_set_transmitter(params, phy, 0); bnx2x_set_mdio_clk(bp, params->chip_id, params->port); bnx2x_set_aer_mmd(params, phy); @@ -4340,6 +4378,30 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, MDIO_WC_REG_XGXSBLK1_LANECTRL2, val16 & 0xff00); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 |= (0x11 << lane); + if (phy->flags & FLAGS_WC_DUAL_MODE) + val16 |= (0x22 << lane); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 &= ~(0x0303 << (lane << 1)); + val16 |= (0x0101 << (lane << 1)); + if (phy->flags & FLAGS_WC_DUAL_MODE) { + val16 &= ~(0x0c0c << (lane << 1)); + val16 |= (0x0404 << (lane << 1)); + } + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); + } static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, @@ -6296,15 +6358,7 @@ static int bnx2x_update_link_down(struct link_params *params, vars->mac_type = MAC_TYPE_NONE; /* Update shared memory */ - vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | - LINK_STATUS_LINK_UP | - LINK_STATUS_PHYSICAL_LINK_FLAG | - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | - LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | - LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | - LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE); + vars->link_status &= ~LINK_UPDATE_MASK; vars->line_speed = 0; bnx2x_update_mng(params, vars->link_status); @@ -6452,6 +6506,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; u8 active_external_phy = INT_PHY; vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_UPDATE_MASK; for (phy_index = INT_PHY; phy_index < params->num_phys; phy_index++) { phy_vars[phy_index].flow_ctrl = 0; @@ -7579,7 +7634,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params, static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, u16 addr, u8 byte_cnt, - u8 *o_buf) + u8 *o_buf, u8 is_init) { int rc = 0; u8 i, j = 0, cnt = 0; @@ -7596,10 +7651,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* 4 byte aligned address */ addr32 = addr & (~0x3); do { - if (cnt == I2C_WA_PWR_ITER) { + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { bnx2x_warpcore_power_module(params, phy, 0); /* Note that 100us are not enough here */ - usleep_range(1000,1000); + usleep_range(1000, 2000); bnx2x_warpcore_power_module(params, phy, 1); } rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, @@ -7719,7 +7774,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); + byte_cnt, o_buf, 0); break; } return rc; @@ -7923,6 +7978,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, { u8 val; + int rc; struct bnx2x *bp = params->bp; u16 timeout; /* Initialization time after hot-plug may take up to 300ms for @@ -7930,8 +7986,14 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, */ for (timeout = 0; timeout < 60; timeout++) { - if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) - == 0) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, + params, 1, + 1, &val, 1); + else + rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, + &val); + if (rc == 0) { DP(NETIF_MSG_LINK, "SFP+ module initialization took %d ms\n", timeout * 5); @@ -7939,7 +8001,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, } usleep_range(5000, 10000); } - return -EINVAL; + rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val); + return rc; } static void bnx2x_8727_power_module(struct bnx2x *bp, @@ -9878,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, else rc = bnx2x_8483x_disable_eee(phy, params, vars); if (rc) { - DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); + DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); return rc; } } else { @@ -10993,7 +11056,7 @@ static struct bnx2x_phy phy_xgxs = { .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func }; static struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, @@ -11465,6 +11528,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, phy->media_type = ETH_PHY_BASE_T; break; case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); phy->media_type = ETH_PHY_XFP_FIBER; break; case PORT_HW_CFG_NET_SERDES_IF_SFI: @@ -12919,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); break; default: - DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); + DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); } DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, old_status, status); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d5648fc..01611b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6794,8 +6794,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { - bnx2x_init_block(bp, BLOCK_BRB1, init_phase); if (IS_MF(bp)) low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); @@ -9544,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) */ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) { - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); + if (!CHIP_IS_E1x(bp)) { + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_FUNC(bp)); + } } } @@ -11902,7 +11906,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, /* disable FCOE L2 queue for E1x */ if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; - + /* disable FCOE for 57840 device, until FW supports it */ + switch (ent->driver_data) { + case BCM57840_O: + case BCM57840_4_10: + case BCM57840_2_20: + case BCM57840_MFO: + case BCM57840_MF: + bp->flags |= NO_FCOE_FLAG; + } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 71971a1..614981c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp, /* Check if this request is ok */ rc = o->validate(bp, o->owner, elem); if (rc) { - BNX2X_ERR("Preamble failed: %d\n", rc); + DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); goto free_and_exit; } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index a4da893..378988b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -251,6 +251,8 @@ struct adapter_params { unsigned char rev; /* chip revision */ unsigned char offload; + unsigned char bypass; + unsigned int ofldq_wr_cred; }; @@ -642,6 +644,23 @@ extern int dbfifo_int_thresh; #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) +static inline int is_bypass(struct adapter *adap) +{ + return adap->params.bypass; +} + +static inline int is_bypass_device(int device) +{ + /* this should be set based upon device capabilities */ + switch (device) { + case 0x440b: + case 0x440c: + return 1; + default: + return 0; + } +} + static inline unsigned int core_ticks_per_usec(const struct adapter *adap) { return adap->params.vpd.cclk / 1000; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 604f4f8..0df1284 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3416,16 +3416,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) finicsum, cfcsum); /* - * If we're a pure NIC driver then disable all offloading facilities. - * This will allow the firmware to optimize aspects of the hardware - * configuration which will result in improved performance. - */ - caps_cmd.ofldcaps = 0; - caps_cmd.iscsicaps = 0; - caps_cmd.rdmacaps = 0; - caps_cmd.fcoecaps = 0; - - /* * And now tell the firmware to use the configuration we just loaded. */ caps_cmd.op_to_write = @@ -3513,18 +3503,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; -#ifndef CONFIG_CHELSIO_T4_OFFLOAD - /* - * If we're a pure NIC driver then disable all offloading facilities. - * This will allow the firmware to optimize aspects of the hardware - * configuration which will result in improved performance. - */ - caps_cmd.ofldcaps = 0; - caps_cmd.iscsicaps = 0; - caps_cmd.rdmacaps = 0; - caps_cmd.fcoecaps = 0; -#endif - if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { if (!vf_acls) caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); @@ -3745,6 +3723,7 @@ static int adap_init0(struct adapter *adap) u32 v, port_vec; enum dev_state state; u32 params[7], val[7]; + struct fw_caps_config_cmd caps_cmd; int reset = 1, j; /* @@ -3898,6 +3877,9 @@ static int adap_init0(struct adapter *adap) goto bye; } + if (is_bypass_device(adap->pdev->device)) + adap->params.bypass = 1; + /* * Grab some of our basic fundamental operating parameters. */ @@ -3940,13 +3922,12 @@ static int adap_init0(struct adapter *adap) adap->tids.aftid_end = val[1]; } -#ifdef CONFIG_CHELSIO_T4_OFFLOAD /* * Get device capabilities so we can determine what resources we need * to manage. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ); caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), @@ -3991,15 +3972,6 @@ static int adap_init0(struct adapter *adap) adap->vres.ddp.size = val[4] - val[3] + 1; adap->params.ofldq_wr_cred = val[5]; - params[0] = FW_PARAM_PFVF(ETHOFLD_START); - params[1] = FW_PARAM_PFVF(ETHOFLD_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, - params, val); - if ((val[0] != val[1]) && (ret >= 0)) { - adap->tids.uotid_base = val[0]; - adap->tids.nuotids = val[1] - val[0] + 1; - } - adap->params.offload = 1; } if (caps_cmd.rdmacaps) { @@ -4048,7 +4020,6 @@ static int adap_init0(struct adapter *adap) } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV -#endif /* CONFIG_CHELSIO_T4_OFFLOAD */ /* * These are finalized by FW initialization, load their values now. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 1b899fe..39bec73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -102,6 +102,9 @@ struct tid_info { unsigned int ftid_base; unsigned int aftid_base; unsigned int aftid_end; + /* Server filter region */ + unsigned int sftid_base; + unsigned int nsftids; spinlock_t atid_lock ____cacheline_aligned_in_smp; union aopen_entry *afree; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 32eec15..730ae2c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); @@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, HOSTPAGESIZEPF7(sge_hps)); t4_set_reg_field(adap, SGE_CONTROL, - INGPADBOUNDARY(INGPADBOUNDARY_MASK) | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE_MASK, INGPADBOUNDARY(fl_align_log - 5) | EGRSTATUSPAGESIZE(stat_len != 64)); @@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, { struct fw_vi_enable_cmd c; + memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 1d03dcd..19ac096 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; - if (!netif_running(ndev)) + if (!netif_running(ndev)) { + netif_device_attach(ndev); + return 0; + } gfar_init_bds(ndev); init_registers(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index b9db0e0..2e5daee 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) pr_err("no resource\n"); goto no_resource; } - if (request_resource(&ioport_resource, etsects->rsrc)) { + if (request_resource(&iomem_resource, etsects->rsrc)) { pr_err("resource busy\n"); goto no_resource; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 56b20d1..116f0e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2673,6 +2673,9 @@ static int ixgbe_get_ts_info(struct net_device *dev, case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f8064df..60ac46f 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev) jme_clear_pm(jme); JME_NAPI_ENABLE(jme); - tasklet_enable(&jme->linkch_task); - tasklet_enable(&jme->txclean_task); - tasklet_hi_enable(&jme->rxclean_task); - tasklet_hi_enable(&jme->rxempty_task); + tasklet_init(&jme->linkch_task, jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet, + (unsigned long) jme); rc = jme_request_irq(jme); if (rc) @@ -1948,10 +1952,10 @@ jme_close(struct net_device *netdev) JME_NAPI_DISABLE(jme); - tasklet_disable(&jme->linkch_task); - tasklet_disable(&jme->txclean_task); - tasklet_disable(&jme->rxclean_task); - tasklet_disable(&jme->rxempty_task); + tasklet_kill(&jme->linkch_task); + tasklet_kill(&jme->txclean_task); + tasklet_kill(&jme->rxclean_task); + tasklet_kill(&jme->rxempty_task); jme_disable_rx_engine(jme); jme_disable_tx_engine(jme); @@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev, tasklet_init(&jme->pcc_task, jme_pcc_tasklet, (unsigned long) jme); - tasklet_init(&jme->linkch_task, - jme_link_change_tasklet, - (unsigned long) jme); - tasklet_init(&jme->txclean_task, - jme_tx_clean_tasklet, - (unsigned long) jme); - tasklet_init(&jme->rxclean_task, - jme_rx_clean_tasklet, - (unsigned long) jme); - tasklet_init(&jme->rxempty_task, - jme_rx_empty_tasklet, - (unsigned long) jme); - tasklet_disable_nosync(&jme->linkch_task); - tasklet_disable_nosync(&jme->txclean_task); - tasklet_disable_nosync(&jme->rxclean_task); - tasklet_disable_nosync(&jme->rxempty_task); jme->dpi.cur = PCC_P1; jme->reg_ghc = 0; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9b9c2ac..d19a143 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) dev0 = hw->dev[0]; unregister_netdev(dev0); - tasklet_disable(&hw->phy_task); + tasklet_kill(&hw->phy_task); spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5d367958..b799ab12 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -237,7 +237,7 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, if (err) return err; - memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate)); + memcpy(priv->maxrate, tmp, sizeof(priv->maxrate)); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c10e3a6..b35094c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); - mlx4_qp_release_range(mdev->dev, ring->qpn, 1); mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); @@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (bounce) tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); - if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 51c7649..b84a88b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, ctx = &priv->mfunc.master.slave_state[slave]; spin_lock_irqsave(&ctx->lock, flags); - mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n", - __func__, slave, cur_state, event); - switch (cur_state) { case SLAVE_PORT_DOWN: if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) @@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, goto out; } ret = mlx4_get_slave_port_state(dev, slave, port); - mlx4_dbg(dev, "%s: slave: %d, current state: %d new event" - " :%d gen_event: %d\n", - __func__, slave, cur_state, event, *gen_event); out: spin_unlock_irqrestore(&ctx->lock, flags); @@ -843,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); } +static void mlx4_unmap_uar(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) + if (priv->eq_table.uar_map[i]) { + iounmap(priv->eq_table.uar_map[i]); + priv->eq_table.uar_map[i] = NULL; + } +} + static int mlx4_create_eq(struct mlx4_dev *dev, int nent, u8 intr, struct mlx4_eq *eq) { @@ -1207,6 +1213,7 @@ err_out_unmap: mlx4_free_irqs(dev); err_out_bitmap: + mlx4_unmap_uar(dev); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); err_out_free: @@ -1231,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); - for (i = 0; i < mlx4_num_eq_uar(dev); ++i) - if (priv->eq_table.uar_map[i]) - iounmap(priv->eq_table.uar_map[i]); - + mlx4_unmap_uar(dev); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); kfree(priv->eq_table.uar_map); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 80df2ab..2aa80af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1405,7 +1405,10 @@ unmap_bf: unmap_bf_area(dev); err_close: - mlx4_close_hca(dev); + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else + mlx4_CLOSE_HCA(dev, 0); err_free_icm: if (!mlx4_is_slave(dev)) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 926c911..b05705f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave, new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; *(u8 *)(inbox->buf + 35) = new_index; - - mlx4_dbg(dev, "port = %d, orig pkey index = %d, " - "new pkey index = %d\n", port, orig_index, new_index); } static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, @@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) qp_ctx->alt_path.mgid_index = slave & 0x7F; } - - mlx4_dbg(dev, "slave %d, new gid index: 0x%x ", - slave, qp_ctx->pri_path.mgid_index); } static int mpt_mask(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 318fee9..69e0197 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev) /* Delay for receive task to stop scheduling itself. */ msleep(2000 / HZ); - tasklet_disable(&hw_priv->rx_tasklet); - tasklet_disable(&hw_priv->tx_tasklet); + tasklet_kill(&hw_priv->rx_tasklet); + tasklet_kill(&hw_priv->tx_tasklet); free_irq(dev->irq, hw_priv->dev); transmit_cleanup(hw_priv, 0); @@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev) rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); if (rc) return rc; - tasklet_enable(&hw_priv->rx_tasklet); - tasklet_enable(&hw_priv->tx_tasklet); + tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, + (unsigned long) hw_priv); + tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, + (unsigned long) hw_priv); hw->promiscuous = 0; hw->all_multi = 0; @@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev, spin_lock_init(&hw_priv->hwlock); mutex_init(&hw_priv->lock); - /* tasklet is enabled. */ - tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, - (unsigned long) hw_priv); - tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, - (unsigned long) hw_priv); - - /* tasklet_enable will decrement the atomic counter. */ - tasklet_disable(&hw_priv->rx_tasklet); - tasklet_disable(&hw_priv->tx_tasklet); - for (i = 0; i < TOTAL_PORT_NUM; i++) init_waitqueue_head(&hw_priv->counter[i].counter); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 53743f7..af8b414 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) pldat->dma_buff_base_p); free_irq(ndev->irq, ndev); iounmap(pldat->net_base); + mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); clk_disable(pldat->clk); clk_put(pldat->clk); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index b2a94d0..4c4fe5b 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -339,26 +339,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) } /** - * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context - * @reg: Pointer of register - * @busy: Busy bit - */ -static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) -{ - u32 tmp; - int ret = -1; - /* wait busy */ - tmp = 20; - while ((ioread32(reg) & bit) && --tmp) - udelay(5); - if (!tmp) - pr_err("Error: busy bit is not cleared\n"); - else - ret = 0; - return ret; -} - -/** * pch_gbe_mac_mar_set - Set MAC address register * @hw: Pointer to the HW structure * @addr: Pointer to the MAC address @@ -409,15 +389,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) return; } -static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) +static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw) { - /* Read the MAC addresses. and store to the private data */ - pch_gbe_mac_read_mac_addr(hw); - iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); - pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); - /* Setup the MAC addresses */ - pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); - return; + u32 rctl; + /* Disables Receive MAC */ + rctl = ioread32(&hw->reg->MAC_RX_EN); + iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); +} + +static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw) +{ + u32 rctl; + /* Enables Receive MAC */ + rctl = ioread32(&hw->reg->MAC_RX_EN); + iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); } /** @@ -913,7 +898,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) { struct pch_gbe_hw *hw = &adapter->hw; - u32 rdba, rdlen, rctl, rxdma; + u32 rdba, rdlen, rxdma; pr_debug("dma adr = 0x%08llx size = 0x%08x\n", (unsigned long long)adapter->rx_ring->dma, @@ -921,9 +906,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) pch_gbe_mac_force_mac_fc(hw); - /* Disables Receive MAC */ - rctl = ioread32(&hw->reg->MAC_RX_EN); - iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); + pch_gbe_disable_mac_rx(hw); /* Disables Receive DMA */ rxdma = ioread32(&hw->reg->DMA_CTRL); @@ -1316,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } -static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) +static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw) { - struct pch_gbe_hw *hw = &adapter->hw; u32 rxdma; - u16 value; - int ret; /* Disable Receive DMA */ rxdma = ioread32(&hw->reg->DMA_CTRL); rxdma &= ~PCH_GBE_RX_DMA_EN; iowrite32(rxdma, &hw->reg->DMA_CTRL); - /* Wait Rx DMA BUS is IDLE */ - ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); - if (ret) { - /* Disable Bus master */ - pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); - value &= ~PCI_COMMAND_MASTER; - pci_write_config_word(adapter->pdev, PCI_COMMAND, value); - /* Stop Receive */ - pch_gbe_mac_reset_rx(hw); - /* Enable Bus master */ - value |= PCI_COMMAND_MASTER; - pci_write_config_word(adapter->pdev, PCI_COMMAND, value); - } else { - /* Stop Receive */ - pch_gbe_mac_reset_rx(hw); - } - /* reprogram multicast address register after reset */ - pch_gbe_set_multi(adapter->netdev); } -static void pch_gbe_start_receive(struct pch_gbe_hw *hw) +static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw) { u32 rxdma; @@ -1355,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw) rxdma = ioread32(&hw->reg->DMA_CTRL); rxdma |= PCH_GBE_RX_DMA_EN; iowrite32(rxdma, &hw->reg->DMA_CTRL); - /* Enables Receive */ - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); - return; } /** @@ -1393,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), &hw->reg->INT_EN); - pch_gbe_stop_receive(adapter); + pch_gbe_disable_dma_rx(&adapter->hw); int_st |= ioread32(&hw->reg->INT_ST); int_st = int_st & ioread32(&hw->reg->INT_EN); } @@ -1971,12 +1930,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; - int err; + int err = -EINVAL; /* Ensure we have a valid MAC */ if (!is_valid_ether_addr(adapter->hw.mac.addr)) { pr_err("Error: Invalid MAC address\n"); - return -EINVAL; + goto out; } /* hardware has been reset, we need to reload some things */ @@ -1989,18 +1948,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) err = pch_gbe_request_irq(adapter); if (err) { - pr_err("Error: can't bring device up\n"); - return err; + pr_err("Error: can't bring device up - irq request failed\n"); + goto out; } err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); if (err) { - pr_err("Error: can't bring device up\n"); - return err; + pr_err("Error: can't bring device up - alloc rx buffers pool failed\n"); + goto freeirq; } pch_gbe_alloc_tx_buffers(adapter, tx_ring); pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); adapter->tx_queue_len = netdev->tx_queue_len; - pch_gbe_start_receive(&adapter->hw); + pch_gbe_enable_dma_rx(&adapter->hw); + pch_gbe_enable_mac_rx(&adapter->hw); mod_timer(&adapter->watchdog_timer, jiffies); @@ -2009,6 +1969,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) netif_start_queue(adapter->netdev); return 0; + +freeirq: + pch_gbe_free_irq(adapter); +out: + return err; } /** @@ -2405,7 +2370,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) int work_done = 0; bool poll_end_flag = false; bool cleaned = false; - u32 int_en; pr_debug("budget : %d\n", budget); @@ -2422,19 +2386,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) if (poll_end_flag) { napi_complete(napi); - if (adapter->rx_stop_flag) { - adapter->rx_stop_flag = false; - pch_gbe_start_receive(&adapter->hw); - } pch_gbe_irq_enable(adapter); - } else - if (adapter->rx_stop_flag) { - adapter->rx_stop_flag = false; - pch_gbe_start_receive(&adapter->hw); - int_en = ioread32(&adapter->hw.reg->INT_EN); - iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), - &adapter->hw.reg->INT_EN); - } + } + + if (adapter->rx_stop_flag) { + adapter->rx_stop_flag = false; + pch_gbe_enable_dma_rx(&adapter->hw); + } pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", poll_end_flag, work_done, budget); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index df09b1c..6407d0d 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2525,6 +2525,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) qdev->req_q_size = (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); + qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); + + /* The barrier is required to ensure request and response queue + * addr writes to the registers. + */ + wmb(); + qdev->req_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->req_q_size, @@ -2536,8 +2543,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) return -ENOMEM; } - qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); - qdev->rsp_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->rsp_q_size, diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 1c81825..b01f83a 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp) cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e7ff886..927aa33 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32: @@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + RTL_W32(MAR0 + 4, mc_filter[1]); RTL_W32(MAR0 + 0, mc_filter[0]); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index fb9f6b3..edf5edb 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev) netif_start_queue(net_dev); /* Workaround for EDB */ - sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 62d1baf..c53c0f4 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev) static int __devinit smsc911x_init(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int byte_test; + unsigned int byte_test, mask; unsigned int to = 100; SMSC_TRACE(pdata, probe, "Driver Parameters:"); @@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev) /* * poll the READY bit in PMT_CTRL. Any other access to the device is * forbidden while this bit isn't set. Try for 100ms + * + * Note that this test is done before the WORD_SWAP register is + * programmed. So in some configurations the READY bit is at 16 before + * WORD_SWAP is written to. This issue is worked around by waiting + * until either bit 0 or bit 16 gets set in PMT_CTRL. + * + * SMSC has confirmed that checking bit 16 (marked as reserved in + * the datasheet) is fine since these bits "will either never be set + * or can only go high after READY does (so also indicate the device + * is ready)". */ - while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + + mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_); + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to) udelay(1000); + if (to == 0) { pr_err("Device not READY in 100ms aborting\n"); return -ENODEV; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index b26cbda..2c41894 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_TI bool "Texas Instruments (TI) devices" default y - depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) + depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX)) ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 4e2a162..66e025a 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) ingress_irq = rc; tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, - 0, NULL, NULL); + 0, "tile_net", NULL); if (rc != 0) { netdev_err(dev, "request_irq failed: %d\n", rc); destroy_irq(ingress_irq); @@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb) { struct skb_shared_info *sh = skb_shinfo(skb); unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; + unsigned int data_len = skb->len - sh_len; unsigned int p_len = sh->gso_size; long f_id = -1; /* id of the current fragment */ - long f_size = skb->hdr_len; /* size of the current fragment */ - long f_used = sh_len; /* bytes used from the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ long n; /* size of the current piece of payload */ int num_edescs = 0; int segment; @@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb) /* Advance as needed. */ while (f_used >= f_size) { f_id++; - f_size = sh->frags[f_id].size; + f_size = skb_frag_size(&sh->frags[f_id]); f_used = 0; } @@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, struct iphdr *ih; struct tcphdr *th; unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; + unsigned int data_len = skb->len - sh_len; unsigned char *data = skb->data; unsigned int ih_off, th_off, p_len; unsigned int isum_seed, tsum_seed, id, seq; long f_id = -1; /* id of the current fragment */ - long f_size = skb->hdr_len; /* size of the current fragment */ - long f_used = sh_len; /* bytes used from the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ long n; /* size of the current piece of payload */ int segment; @@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, isum_seed = ((0xFFFF - ih->check) + (0xFFFF - ih->tot_len) + (0xFFFF - ih->id)); - tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); + tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); id = ntohs(ih->id); seq = ntohl(th->seq); @@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, /* Advance as needed. */ while (f_used >= f_size) { f_id++; - f_size = sh->frags[f_id].size; + f_size = skb_frag_size(&sh->frags[f_id]); f_used = 0; } @@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, struct tile_net_priv *priv = netdev_priv(dev); struct skb_shared_info *sh = skb_shinfo(skb); unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; + unsigned int data_len = skb->len - sh_len; unsigned int p_len = sh->gso_size; gxio_mpipe_edesc_t edesc_head = { { 0 } }; gxio_mpipe_edesc_t edesc_body = { { 0 } }; long f_id = -1; /* id of the current fragment */ - long f_size = skb->hdr_len; /* size of the current fragment */ - long f_used = sh_len; /* bytes used from the current fragment */ - void *f_data = skb->data; + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + void *f_data = skb->data + sh_len; long n; /* size of the current piece of payload */ unsigned long tx_packets = 0, tx_bytes = 0; unsigned int csum_start; @@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, /* Egress the payload. */ while (p_used < p_len) { + void *va; /* Advance as needed. */ while (f_used >= f_size) { f_id++; - f_size = sh->frags[f_id].size; - f_used = 0; + f_size = skb_frag_size(&sh->frags[f_id]); f_data = tile_net_frag_buf(&sh->frags[f_id]); + f_used = 0; } + va = f_data + f_used; + /* Use bytes from the current fragment. */ n = p_len - p_used; if (n > f_size - f_used) @@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, p_used += n; /* Egress a piece of the payload. */ - edesc_body.va = va_to_tile_io_addr(f_data) + f_used; + edesc_body.va = va_to_tile_io_addr(va); edesc_body.xfer_size = n; edesc_body.bound = !(p_used < p_len); gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 0793299..a788501 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -894,6 +894,8 @@ out: return IRQ_HANDLED; } +static void axienet_dma_err_handler(unsigned long data); + /** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure @@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev) phy_start(lp->phy_dev); } + /* Enable tasklets for Axi DMA error handling */ + tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, + (unsigned long) lp); + /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); if (ret) @@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev) ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); if (ret) goto err_rx_irq; - /* Enable tasklets for Axi DMA error handling */ - tasklet_enable(&lp->dma_err_tasklet); + return 0; err_rx_irq: @@ -960,6 +965,7 @@ err_tx_irq: if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; + tasklet_kill(&lp->dma_err_tasklet); dev_err(lp->dev, "request_irq() failed\n"); return ret; } @@ -990,7 +996,7 @@ static int axienet_stop(struct net_device *ndev) axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - tasklet_disable(&lp->dma_err_tasklet); + tasklet_kill(&lp->dma_err_tasklet); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); @@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op) goto err_iounmap_2; } - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, - (unsigned long) lp); - tasklet_disable(&lp->dma_err_tasklet); - return 0; err_iounmap_2: diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 98934bd..477d672 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1102,10 +1102,12 @@ static int init_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 5039f08..43e9ab4 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 983bbf4..961f0b2 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -15,6 +15,11 @@ if PHYLIB comment "MII PHY device drivers" +config AT803X_PHY + tristate "Drivers for Atheros AT803X PHYs" + ---help--- + Currently supports the AT8030 and AT8035 model + config AMD_PHY tristate "Drivers for the AMD PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 426674d..9645e38 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o +obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c new file mode 100644 index 0000000..45cbc10 --- /dev/null +++ b/drivers/net/phy/at803x.c @@ -0,0 +1,176 @@ +/* + * drivers/net/phy/at803x.c + * + * Driver for Atheros 803x PHY + * + * Author: Matus Ujhelyi <ujhelyi.m@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/phy.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#define AT803X_INTR_ENABLE 0x12 +#define AT803X_INTR_STATUS 0x13 +#define AT803X_WOL_ENABLE 0x01 +#define AT803X_DEVICE_ADDR 0x03 +#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C +#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B +#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A +#define AT803X_MMD_ACCESS_CONTROL 0x0D +#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E +#define AT803X_FUNC_DATA 0x4003 + +MODULE_DESCRIPTION("Atheros 803x PHY driver"); +MODULE_AUTHOR("Matus Ujhelyi"); +MODULE_LICENSE("GPL"); + +static void at803x_set_wol_mac_addr(struct phy_device *phydev) +{ + struct net_device *ndev = phydev->attached_dev; + const u8 *mac; + unsigned int i, offsets[] = { + AT803X_LOC_MAC_ADDR_32_47_OFFSET, + AT803X_LOC_MAC_ADDR_16_31_OFFSET, + AT803X_LOC_MAC_ADDR_0_15_OFFSET, + }; + + if (!ndev) + return; + + mac = (const u8 *) ndev->dev_addr; + + if (!is_valid_ether_addr(mac)) + return; + + for (i = 0; i < 3; i++) { + phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, + AT803X_DEVICE_ADDR); + phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA, + offsets[i]); + phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, + AT803X_FUNC_DATA); + phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA, + mac[(i * 2) + 1] | (mac[(i * 2)] << 8)); + } +} + +static int at803x_config_init(struct phy_device *phydev) +{ + int val; + u32 features; + int status; + + features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI | + SUPPORTED_FIBRE | SUPPORTED_BNC; + + val = phy_read(phydev, MII_BMSR); + if (val < 0) + return val; + + if (val & BMSR_ANEGCAPABLE) + features |= SUPPORTED_Autoneg; + if (val & BMSR_100FULL) + features |= SUPPORTED_100baseT_Full; + if (val & BMSR_100HALF) + features |= SUPPORTED_100baseT_Half; + if (val & BMSR_10FULL) + features |= SUPPORTED_10baseT_Full; + if (val & BMSR_10HALF) + features |= SUPPORTED_10baseT_Half; + + if (val & BMSR_ESTATEN) { + val = phy_read(phydev, MII_ESTATUS); + if (val < 0) + return val; + + if (val & ESTATUS_1000_TFULL) + features |= SUPPORTED_1000baseT_Full; + if (val & ESTATUS_1000_THALF) + features |= SUPPORTED_1000baseT_Half; + } + + phydev->supported = features; + phydev->advertising = features; + + /* enable WOL */ + at803x_set_wol_mac_addr(phydev); + status = phy_write(phydev, AT803X_INTR_ENABLE, AT803X_WOL_ENABLE); + status = phy_read(phydev, AT803X_INTR_STATUS); + + return 0; +} + +/* ATHEROS 8035 */ +static struct phy_driver at8035_driver = { + .phy_id = 0x004dd072, + .name = "Atheros 8035 ethernet", + .phy_id_mask = 0xffffffef, + .config_init = at803x_config_init, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .driver = { + .owner = THIS_MODULE, + }, +}; + +/* ATHEROS 8030 */ +static struct phy_driver at8030_driver = { + .phy_id = 0x004dd076, + .name = "Atheros 8030 ethernet", + .phy_id_mask = 0xffffffef, + .config_init = at803x_config_init, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .driver = { + .owner = THIS_MODULE, + }, +}; + +static int __init atheros_init(void) +{ + int ret; + + ret = phy_driver_register(&at8035_driver); + if (ret) + goto err1; + + ret = phy_driver_register(&at8030_driver); + if (ret) + goto err2; + + return 0; + +err2: + phy_driver_unregister(&at8035_driver); +err1: + return ret; +} + +static void __exit atheros_exit(void) +{ + phy_driver_unregister(&at8035_driver); + phy_driver_unregister(&at8030_driver); +} + +module_init(atheros_init); +module_exit(atheros_exit); + +static struct mdio_device_id __maybe_unused atheros_tbl[] = { + { 0x004dd076, 0xffffffef }, + { 0x004dd072, 0xffffffef }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, atheros_tbl); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 899274f..2ed1140 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; - int ret; + int ret, bus_id; - if (pdev->dev.of_node) + if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); - else + bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + } else { pdata = pdev->dev.platform_data; + bus_id = pdev->id; + } if (!pdata) return -ENODEV; - new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); if (!new_bus) return -ENODEV; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d44cca3..ad86660 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1794,10 +1794,12 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GRO; - dev->hw_features = NETIF_F_HW_VLAN_TX | + dev->hw_features = TEAM_VLAN_FEATURES | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); dev->features |= dev->hw_features; } diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index 9db0171..c5db428 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) if (last) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { - ret = team_dev_queue_xmit(team, last, - skb2); + ret = !team_dev_queue_xmit(team, last, + skb2); if (!sum_ret) sum_ret = ret; } @@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) } } if (last) { - ret = team_dev_queue_xmit(team, last, skb); + ret = !team_dev_queue_xmit(team, last, skb); if (!sum_ret) sum_ret = ret; } diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index c81e278..08d55b6 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -31,6 +31,7 @@ #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/gfp.h> +#include <linux/if_vlan.h> /* @@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf) /* no jumbogram (16K) support for now */ - dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; + dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a03de71..d012982 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -592,6 +592,32 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* Novatel USB551L and MC551 - handled by qmi_wwan */ +{ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = NOVATEL_VENDOR_ID, + .idProduct = 0xB001, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = 0, +}, + +/* Novatel E362 - handled by qmi_wwan */ +{ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = NOVATEL_VENDOR_ID, + .idProduct = 0x9010, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = 0, +}, + /* * WHITELIST!!! * @@ -604,21 +630,6 @@ static const struct usb_device_id products [] = { * because of bugs/quirks in a given product (like Zaurus, above). */ { - /* Novatel USB551L */ - /* This match must come *before* the generic CDC-ETHER match so that - * we get FLAG_WWAN set on the device, since it's descriptors are - * generic CDC-ETHER. - */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR - | USB_DEVICE_ID_MATCH_PRODUCT - | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = NOVATEL_VENDOR_ID, - .idProduct = 0xB001, - .bInterfaceClass = USB_CLASS_COMM, - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, - .bInterfaceProtocol = USB_CDC_PROTO_NONE, - .driver_info = (unsigned long)&wwan_info, -}, { /* ZTE (Vodafone) K3805-Z */ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 4cd582a..74fab1a 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -540,10 +540,12 @@ advance: (ctx->ether_desc == NULL) || (ctx->control != intf)) goto error; - /* claim interfaces, if any */ - temp = usb_driver_claim_interface(driver, ctx->data, dev); - if (temp) - goto error; + /* claim data interface, if different from control */ + if (ctx->data != ctx->control) { + temp = usb_driver_claim_interface(driver, ctx->data, dev); + if (temp) + goto error; + } iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; @@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) tasklet_kill(&ctx->bh); + /* handle devices with combined control and data interface */ + if (ctx->control == ctx->data) + ctx->data = NULL; + /* disconnect master --> disconnect slave */ if (intf == ctx->control && ctx->data) { usb_set_intfdata(ctx->data, NULL); @@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Huawei NCM devices disguised as vendor specific */ + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16), + .driver_info = (unsigned long)&wwan_info, + }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), + .driver_info = (unsigned long)&wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index a28a983..534d8be 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -62,6 +62,7 @@ #define USB_PRODUCT_IPAD 0x129a #define USB_PRODUCT_IPHONE_4_VZW 0x129c #define USB_PRODUCT_IPHONE_4S 0x12a0 +#define USB_PRODUCT_IPHONE_5 0x12a8 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = { USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6883c37..1ea91f4 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -369,18 +369,74 @@ static const struct usb_device_id products[] = { USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Novatel USB551L and MC551 */ + USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0xb001, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, + { /* Novatel E362 */ + USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9010, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ + {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, + {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, + {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0021, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x0025, 1)}, + {QMI_FIXED_INTF(0x19d2, 0x0031, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x0042, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x0049, 5)}, + {QMI_FIXED_INTF(0x19d2, 0x0052, 4)}, {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0058, 4)}, {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0113, 5)}, + {QMI_FIXED_INTF(0x19d2, 0x0118, 5)}, + {QMI_FIXED_INTF(0x19d2, 0x0121, 5)}, + {QMI_FIXED_INTF(0x19d2, 0x0123, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x0124, 5)}, + {QMI_FIXED_INTF(0x19d2, 0x0125, 6)}, + {QMI_FIXED_INTF(0x19d2, 0x0126, 5)}, + {QMI_FIXED_INTF(0x19d2, 0x0130, 1)}, + {QMI_FIXED_INTF(0x19d2, 0x0133, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0141, 5)}, {QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */ + {QMI_FIXED_INTF(0x19d2, 0x0158, 3)}, {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ + {QMI_FIXED_INTF(0x19d2, 0x0168, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x0176, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0178, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */ + {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ + {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, + {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1012, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1021, 2)}, + {QMI_FIXED_INTF(0x19d2, 0x1245, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1247, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1252, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1254, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1255, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ + {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, + {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 7479a57..362cb8c 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx) /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; - addr = (phy_id << 11) | (idx << 6) | MII_READ_; + addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_; ret = smsc95xx_write_reg(dev, MII_ADDR, addr); check_warn_goto_done(ret, "Error writing MII_ADDR"); @@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; - addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; + addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_; ret = smsc95xx_write_reg(dev, MII_ADDR, addr); check_warn_goto_done(ret, "Error writing MII_ADDR"); @@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, } else { u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); skb_push(skb, 4); + cpu_to_le32s(&csum_preamble); memcpy(skb->data, &csum_preamble, 4); } } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f9819d1..edb81ed 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, void usbnet_defer_kevent (struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) - netdev_err(dev->net, "kevent %d may have been dropped\n", work); - else + if (!schedule_work (&dev->kevent)) { + if (net_ratelimit()) + netdev_err(dev->net, "kevent %d may have been dropped\n", work); + } else { netdev_dbg(dev->net, "kevent %d scheduled\n", work); + } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent); @@ -1158,6 +1160,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, usb_anchor_urb(urb, &dev->deferred); /* no use to process more packets */ netif_stop_queue(net); + usb_put_urb(urb); spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_dbg(dev->net, "Delaying transmission for resumption\n"); goto deferred; @@ -1310,6 +1313,8 @@ void usbnet_disconnect (struct usb_interface *intf) cancel_work_sync(&dev->kevent); + usb_scuttle_anchored_urbs(&dev->deferred); + if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index ce9d4f2..0ae1bcc 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -744,28 +744,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + u32 buf_size; - tbi = tq->buf_info + tq->tx_ring.next2fill; - tbi->map_type = VMXNET3_MAP_PAGE; - tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, - 0, skb_frag_size(frag), - DMA_TO_DEVICE); + buf_offset = 0; + len = skb_frag_size(frag); + while (len) { + tbi = tq->buf_info + tq->tx_ring.next2fill; + if (len < VMXNET3_MAX_TX_BUF_SIZE) { + buf_size = len; + dw2 |= len; + } else { + buf_size = VMXNET3_MAX_TX_BUF_SIZE; + /* spec says that for TxDesc.len, 0 == 2^14 */ + } + tbi->map_type = VMXNET3_MAP_PAGE; + tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, + buf_offset, buf_size, + DMA_TO_DEVICE); - tbi->len = skb_frag_size(frag); + tbi->len = buf_size; - gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; - BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); - gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); - gdesc->dword[3] = 0; + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); + gdesc->dword[2] = cpu_to_le32(dw2); + gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, - "txd[%u]: 0x%llu %u %u\n", - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); - vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); - dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + dev_dbg(&adapter->netdev->dev, + "txd[%u]: 0x%llu %u %u\n", + tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), + le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + + len -= buf_size; + buf_offset += buf_size; + } } ctx->eop_txd = gdesc; @@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb, } } +static int txd_estimate(const struct sk_buff *skb) +{ + int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + + count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); + } + return count; +} /* * Transmits a pkt thru a given tq @@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, union Vmxnet3_GenericDesc tempTxDesc; #endif - /* conservatively estimate # of descriptors to use */ - count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + - skb_shinfo(skb)->nr_frags + 1; + count = txd_estimate(skb); ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 607976c..8b5c619 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1,5 +1,5 @@ /* - * VXLAN: Virtual eXtensiable Local Area Network + * VXLAN: Virtual eXtensible Local Area Network * * Copyright (c) 2012 Vyatta Inc. * @@ -50,8 +50,8 @@ #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) -/* VLAN + IP header + UDP + VXLAN */ -#define VXLAN_HEADROOM (4 + 20 + 8 + 8) +/* IP header + UDP + VXLAN + Ethernet header */ +#define VXLAN_HEADROOM (20 + 8 + 8 + 14) #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ @@ -816,7 +816,7 @@ static void vxlan_cleanup(unsigned long arg) = container_of(p, struct vxlan_fdb, hlist); unsigned long timeout; - if (f->state == NUD_PERMANENT) + if (f->state & NUD_PERMANENT) continue; timeout = f->used + vxlan->age_interval * HZ; @@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; + + /* update header length based on lower device */ + dev->hard_header_len = lowerdev->hard_header_len + + VXLAN_HEADROOM; } if (data[IFLA_VXLAN_TOS]) diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3f575af..e9a3da5 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c index 192251a..282eede 100644 --- a/drivers/net/wireless/b43legacy/pio.c +++ b/drivers/net/wireless/b43legacy/pio.c @@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue) { struct b43legacy_pio_txpacket *packet, *tmp_packet; - tasklet_disable(&queue->txtask); + tasklet_kill(&queue->txtask); list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) free_txpacket(packet, 0); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index caa0110..fc24eb9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; + struct page *page = skb_frag_page(frag); - tx->flags |= XEN_NETTXF_more_data; + len = skb_frag_size(frag); + offset = frag->page_offset; - id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); - np->tx_skbs[id].skb = skb_get(skb); - tx = RING_GET_REQUEST(&np->tx, prod++); - tx->id = id; - ref = gnttab_claim_grant_reference(&np->gref_tx_head); - BUG_ON((signed short)ref < 0); + /* Data must not cross a page boundary. */ + BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); - mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); - gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, - mfn, GNTMAP_readonly); + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; - tx->gref = np->grant_tx_ref[id] = ref; - tx->offset = frag->page_offset; - tx->size = skb_frag_size(frag); - tx->flags = 0; + while (len > 0) { + unsigned long bytes; + + BUG_ON(offset >= PAGE_SIZE); + + bytes = PAGE_SIZE - offset; + if (bytes > len) + bytes = len; + + tx->flags |= XEN_NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, + np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(page)); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = bytes; + tx->flags = 0; + + offset += bytes; + len -= bytes; + + /* Next frame */ + if (offset == PAGE_SIZE && len) { + BUG_ON(!PageCompound(page)); + page++; + offset = 0; + } + } } np->tx.req_prod_pvt = prod; } +/* + * Count how many ring slots are required to send the frags of this + * skb. Each frag might be a compound page. + */ +static int xennet_count_skb_frag_slots(struct sk_buff *skb) +{ + int i, frags = skb_shinfo(skb)->nr_frags; + int pages = 0; + + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + unsigned long size = skb_frag_size(frag); + unsigned long offset = frag->page_offset; + + /* Skip unused frames from start of page */ + offset &= ~PAGE_MASK; + + pages += PFN_UP(offset + size); + } + + return pages; +} + static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; @@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) grant_ref_t ref; unsigned long mfn; int notify; - int frags = skb_shinfo(skb)->nr_frags; + int slots; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; - frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); - if (unlikely(frags > MAX_SKB_FRAGS + 1)) { - printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", - frags); - dump_stack(); + slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + + xennet_count_skb_frag_slots(skb); + if (unlikely(slots > MAX_SKB_FRAGS + 1)) { + net_alert_ratelimited( + "xennet: skb rides the rocket: %d slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || - (frags > 1 && !xennet_can_sg(dev)) || + (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; |